From fea36935451127e85bf288f17cd7d11ee6a81e28 Mon Sep 17 00:00:00 2001 From: Gwendal Roulleau Date: Wed, 18 Jan 2023 13:48:27 +0100 Subject: [PATCH] [tts] Make the LRU media cache generic Signed-off-by: Gwendal Roulleau --- .../java/org/openhab/core/voice/TTSCache.java | 3 +- .../voice/internal/cache/AudioFormatInfo.java | 59 +++ .../cache/AudioStreamCacheWrapper.java | 188 ------- .../internal/cache/AudioStreamFromCache.java | 104 ++++ .../internal/cache/AudioStreamSupplier.java | 79 --- .../voice/internal/cache/TTSLRUCacheImpl.java | 203 ++------ .../core/voice/internal/cache/TTSResult.java | 445 ----------------- .../cache/AudioStreamCacheWrapperTest.java | 116 ----- .../internal/cache/TTSLRUCacheImplTest.java | 245 +--------- .../voice/internal/cache/TTSResultTest.java | 330 ------------- .../cache/lru/DataRetrievalException.java | 30 ++ .../cache/lru/InputStreamCacheWrapper.java | 116 +++++ .../openhab/core/cache/lru/LRUMediaCache.java | 248 ++++++++++ .../core/cache/lru/LRUMediaCacheEntry.java | 351 +++++++++++++ .../lru/InputStreamCacheWrapperTest.java | 78 +++ .../cache/lru/LRUMediaCacheEntryTest.java | 301 ++++++++++++ .../core/cache/lru/LRUMediaCacheTest.java | 461 ++++++++++++++++++ 17 files changed, 1822 insertions(+), 1535 deletions(-) create mode 100644 bundles/org.openhab.core.voice/src/main/java/org/openhab/core/voice/internal/cache/AudioFormatInfo.java delete mode 100644 bundles/org.openhab.core.voice/src/main/java/org/openhab/core/voice/internal/cache/AudioStreamCacheWrapper.java create mode 100644 bundles/org.openhab.core.voice/src/main/java/org/openhab/core/voice/internal/cache/AudioStreamFromCache.java delete mode 100644 bundles/org.openhab.core.voice/src/main/java/org/openhab/core/voice/internal/cache/AudioStreamSupplier.java delete mode 100644 bundles/org.openhab.core.voice/src/main/java/org/openhab/core/voice/internal/cache/TTSResult.java delete mode 100644 bundles/org.openhab.core.voice/src/test/java/org/openhab/core/voice/internal/cache/AudioStreamCacheWrapperTest.java delete mode 100644 bundles/org.openhab.core.voice/src/test/java/org/openhab/core/voice/internal/cache/TTSResultTest.java create mode 100644 bundles/org.openhab.core/src/main/java/org/openhab/core/cache/lru/DataRetrievalException.java create mode 100644 bundles/org.openhab.core/src/main/java/org/openhab/core/cache/lru/InputStreamCacheWrapper.java create mode 100644 bundles/org.openhab.core/src/main/java/org/openhab/core/cache/lru/LRUMediaCache.java create mode 100644 bundles/org.openhab.core/src/main/java/org/openhab/core/cache/lru/LRUMediaCacheEntry.java create mode 100644 bundles/org.openhab.core/src/test/java/org/openhab/core/cache/lru/InputStreamCacheWrapperTest.java create mode 100644 bundles/org.openhab.core/src/test/java/org/openhab/core/cache/lru/LRUMediaCacheEntryTest.java create mode 100644 bundles/org.openhab.core/src/test/java/org/openhab/core/cache/lru/LRUMediaCacheTest.java diff --git a/bundles/org.openhab.core.voice/src/main/java/org/openhab/core/voice/TTSCache.java b/bundles/org.openhab.core.voice/src/main/java/org/openhab/core/voice/TTSCache.java index 71fc8224023..6bd532aed95 100644 --- a/bundles/org.openhab.core.voice/src/main/java/org/openhab/core/voice/TTSCache.java +++ b/bundles/org.openhab.core.voice/src/main/java/org/openhab/core/voice/TTSCache.java @@ -41,6 +41,5 @@ public interface TTSCache { * are not supported or another error occurs while creating an * {@link AudioStream} */ - AudioStream get(CachedTTSService tts, String text, Voice voice, AudioFormat requestedFormat) - throws TTSException; + AudioStream get(CachedTTSService tts, String text, Voice voice, AudioFormat requestedFormat) throws TTSException; } diff --git a/bundles/org.openhab.core.voice/src/main/java/org/openhab/core/voice/internal/cache/AudioFormatInfo.java b/bundles/org.openhab.core.voice/src/main/java/org/openhab/core/voice/internal/cache/AudioFormatInfo.java new file mode 100644 index 00000000000..d776ec820b6 --- /dev/null +++ b/bundles/org.openhab.core.voice/src/main/java/org/openhab/core/voice/internal/cache/AudioFormatInfo.java @@ -0,0 +1,59 @@ +/** + * Copyright (c) 2010-2023 Contributors to the openHAB project + * + * See the NOTICE file(s) distributed with this work for additional + * information. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.openhab.core.voice.internal.cache; + +import org.eclipse.jdt.annotation.Nullable; +import org.openhab.core.audio.AudioFormat; + +/** + * Serializable AudioFormat storage class + * We cannot use a record yet (requires Gson v2.10) + * + * @author Gwendal Roulleau - Initial contribution + */ +public class AudioFormatInfo { + public final @Nullable Boolean bigEndian; + public final @Nullable Integer bitDepth; + public final @Nullable Integer bitRate; + public final @Nullable Long frequency; + public final @Nullable Integer channels; + public final @Nullable String codec; + public final @Nullable String container; + + public AudioFormatInfo(String text, @Nullable Boolean bigEndian, @Nullable Integer bitDepth, + @Nullable Integer bitRate, @Nullable Long frequency, @Nullable Integer channels, @Nullable String codec, + @Nullable String container) { + super(); + this.bigEndian = bigEndian; + this.bitDepth = bitDepth; + this.bitRate = bitRate; + this.frequency = frequency; + this.channels = channels; + this.codec = codec; + this.container = container; + } + + public AudioFormatInfo(AudioFormat audioFormat) { + this.bigEndian = audioFormat.isBigEndian(); + this.bitDepth = audioFormat.getBitDepth(); + this.bitRate = audioFormat.getBitRate(); + this.frequency = audioFormat.getFrequency(); + this.channels = audioFormat.getChannels(); + this.codec = audioFormat.getCodec(); + this.container = audioFormat.getContainer(); + } + + public AudioFormat toAudioFormat() { + return new AudioFormat(container, codec, bigEndian, bitDepth, bitRate, frequency, channels); + } +} diff --git a/bundles/org.openhab.core.voice/src/main/java/org/openhab/core/voice/internal/cache/AudioStreamCacheWrapper.java b/bundles/org.openhab.core.voice/src/main/java/org/openhab/core/voice/internal/cache/AudioStreamCacheWrapper.java deleted file mode 100644 index 52ebdec66b6..00000000000 --- a/bundles/org.openhab.core.voice/src/main/java/org/openhab/core/voice/internal/cache/AudioStreamCacheWrapper.java +++ /dev/null @@ -1,188 +0,0 @@ -/** - * Copyright (c) 2010-2023 Contributors to the openHAB project - * - * See the NOTICE file(s) distributed with this work for additional - * information. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License 2.0 which is available at - * http://www.eclipse.org/legal/epl-2.0 - * - * SPDX-License-Identifier: EPL-2.0 - */ -package org.openhab.core.voice.internal.cache; - -import java.io.IOException; -import java.io.InputStream; -import java.util.Objects; - -import org.eclipse.jdt.annotation.NonNullByDefault; -import org.eclipse.jdt.annotation.Nullable; -import org.openhab.core.audio.AudioException; -import org.openhab.core.audio.AudioFormat; -import org.openhab.core.audio.AudioStream; -import org.openhab.core.audio.FixedLengthAudioStream; -import org.openhab.core.voice.TTSException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * Each {@link TTSResult} instance can handle several {@link AudioStream}s. - * This class is a wrapper for such functionality and can - * ask the cached TTSResult for data, allowing concurrent access to - * the audio stream even if it is currently actively read from the TTS service. - * If the cached TTSResult is faulty, then it can take data from the - * fallback supplier (which should be a direct call to the TTS service). - * This class implements the two main read methods (byte by byte, and with an array) - * - * @author Gwendal Roulleau - Initial contribution - */ -@NonNullByDefault -public class AudioStreamCacheWrapper extends FixedLengthAudioStream { - - private final Logger logger = LoggerFactory.getLogger(AudioStreamCacheWrapper.class); - - private TTSResult ttsResult; - private int offset = 0; - - // A fallback mechanism : if reading from the cache fails, - // It will therefore try to fallback to a direct tts stream - private AudioStreamSupplier fallbackDirectSupplier; - private @Nullable AudioStream fallbackDirectAudioStream; - - /*** - * Construct a transparent AudioStream wrapper around the data from the cache. - * - * @param ttsResult The parent cached {@link TTSResult} - * @param supplier A fallback {@link AudioStreamSupplier}, if something goes wrong with the cache mechanism - */ - public AudioStreamCacheWrapper(TTSResult ttsResult, AudioStreamSupplier supplier) { - this.ttsResult = ttsResult; - this.fallbackDirectSupplier = supplier; - } - - @Override - public AudioFormat getFormat() { - return ttsResult.getAudioFormat(); - } - - @Override - public int available() throws IOException { - return ttsResult.availableFrom(offset); - } - - @Override - public int read() throws IOException { - if (fallbackDirectAudioStream == null) { - try { - byte[] bytesRead = ttsResult.read(offset, 1); - if (bytesRead.length == 0) { - return -1; - } else { - offset++; - return bytesRead[0] & 0xff; - } - } catch (IOException e) { - logger.debug("Cannot read from tts cache. Will use the fallback TTS mechanism", e); - } - } - - // beyond this point, we failed, so the fallback must be active : - enableFallback(); - AudioStream fallbackDirectAudioStreamLocal = fallbackDirectAudioStream; - if (fallbackDirectAudioStreamLocal != null) { - return fallbackDirectAudioStreamLocal.read(); - } - - throw new IOException("Neither TTS cache nor TTS fallback method succeed"); - } - - @Override - public int read(byte @Nullable [] b, int off, int len) throws IOException { - if (fallbackDirectAudioStream == null) { - if (b == null) { - throw new IOException("Array to write is null"); - } - Objects.checkFromIndexSize(off, len, b.length); - - if (len == 0) { - return 0; - } - - try { - byte[] bytesRead = ttsResult.read(offset, len); - offset += bytesRead.length; - if (bytesRead.length == 0) { - return -1; - } - int i = 0; - for (; i < len && i < bytesRead.length; i++) { - b[off + i] = bytesRead[i]; - } - return i; - } catch (IOException e) { - logger.debug("Cannot read from tts cache. Will use the fallback TTS mechanism", e); - } - } - - // beyond this point, we failed, so fallback must be active : - enableFallback(); - AudioStream fallbackDirectAudioStreamLocal = fallbackDirectAudioStream; - if (fallbackDirectAudioStreamLocal != null) { - return fallbackDirectAudioStreamLocal.read(b, off, len); - } - - throw new IOException("Neither TTS cache nor TTS fallback method succeed"); - } - - @Override - public long skip(long n) throws IOException { - offset += n; - return n; - } - - @Override - public void close() throws IOException { - try { - super.close(); - } finally { - ttsResult.closeAudioStreamClient(); - } - } - - private void enableFallback() throws IOException { - if (fallbackDirectAudioStream == null) { - try { - AudioStream fallBackDirectResolutionLocal = fallbackDirectSupplier.fallBackDirectResolution(); - this.fallbackDirectAudioStream = fallBackDirectResolutionLocal; - fallBackDirectResolutionLocal.skip(offset); - } catch (TTSException e) { - throw new IOException("Cannot read from TTS service", e); - } - } - } - - @Override - public long length() { - Long totalSize = ttsResult.getTotalSize(); - if (totalSize > 0L) { - return totalSize; - } - try { - enableFallback(); - - AudioStream fallbackDirectAudioStreamLocal = this.fallbackDirectAudioStream; - if (fallbackDirectAudioStreamLocal instanceof FixedLengthAudioStream fixedLengthAudioStream) { - return fixedLengthAudioStream.length(); - } - } catch (IOException e) { - logger.debug("Cannot get the length of the AudioStream"); - } - return 0; - } - - @Override - public InputStream getClonedStream() throws AudioException { - return ttsResult.getAudioStream(fallbackDirectSupplier); - } -} diff --git a/bundles/org.openhab.core.voice/src/main/java/org/openhab/core/voice/internal/cache/AudioStreamFromCache.java b/bundles/org.openhab.core.voice/src/main/java/org/openhab/core/voice/internal/cache/AudioStreamFromCache.java new file mode 100644 index 00000000000..1cc79ea458a --- /dev/null +++ b/bundles/org.openhab.core.voice/src/main/java/org/openhab/core/voice/internal/cache/AudioStreamFromCache.java @@ -0,0 +1,104 @@ +/** + * Copyright (c) 2010-2023 Contributors to the openHAB project + * + * See the NOTICE file(s) distributed with this work for additional + * information. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.openhab.core.voice.internal.cache; + +import java.io.IOException; +import java.io.InputStream; + +import org.eclipse.jdt.annotation.NonNullByDefault; +import org.eclipse.jdt.annotation.Nullable; +import org.openhab.core.audio.AudioException; +import org.openhab.core.audio.AudioFormat; +import org.openhab.core.audio.FixedLengthAudioStream; +import org.openhab.core.cache.lru.InputStreamCacheWrapper; + +/** + * Implements AudioStream methods, with an inner stream extracted from cache + * + * @author Gwendal Roulleau - Initial contribution + */ +@NonNullByDefault +public class AudioStreamFromCache extends FixedLengthAudioStream { + + private InputStreamCacheWrapper inputStream; + private AudioFormat audioFormat; + + public AudioStreamFromCache(InputStreamCacheWrapper inputStream, AudioFormatInfo audioFormat) { + this.inputStream = inputStream; + this.audioFormat = audioFormat.toAudioFormat(); + } + + @Override + public int read() throws IOException { + return inputStream.read(); + } + + @Override + public int read(byte @Nullable [] b, int off, int len) throws IOException { + return inputStream.read(b, off, len); + } + + @Override + public AudioFormat getFormat() { + return audioFormat; + } + + @Override + public long length() { + return inputStream.length(); + } + + @Override + public InputStream getClonedStream() throws AudioException { + try { + return inputStream.getClonedStream(); + } catch (IOException e) { + throw new AudioException("Cannot get cloned AudioStream", e); + } + } + + @Override + public void close() throws IOException { + inputStream.close(); + } + + @Override + public long skip(long n) throws IOException { + return inputStream.skip(n); + } + + @Override + public void skipNBytes(long n) throws IOException { + inputStream.skipNBytes(n); + } + + @Override + public int available() throws IOException { + return inputStream.available(); + } + + @Override + public synchronized void mark(int readlimit) { + inputStream.mark(readlimit); + } + + @Override + public synchronized void reset() throws IOException { + inputStream.reset(); + } + + @Override + public boolean markSupported() { + return inputStream.markSupported(); + } +} diff --git a/bundles/org.openhab.core.voice/src/main/java/org/openhab/core/voice/internal/cache/AudioStreamSupplier.java b/bundles/org.openhab.core.voice/src/main/java/org/openhab/core/voice/internal/cache/AudioStreamSupplier.java deleted file mode 100644 index 1655ab7a916..00000000000 --- a/bundles/org.openhab.core.voice/src/main/java/org/openhab/core/voice/internal/cache/AudioStreamSupplier.java +++ /dev/null @@ -1,79 +0,0 @@ -/** - * Copyright (c) 2010-2023 Contributors to the openHAB project - * - * See the NOTICE file(s) distributed with this work for additional - * information. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License 2.0 which is available at - * http://www.eclipse.org/legal/epl-2.0 - * - * SPDX-License-Identifier: EPL-2.0 - */ -package org.openhab.core.voice.internal.cache; - -import org.eclipse.jdt.annotation.NonNullByDefault; -import org.openhab.core.audio.AudioFormat; -import org.openhab.core.audio.AudioStream; -import org.openhab.core.voice.TTSException; -import org.openhab.core.voice.TTSService; -import org.openhab.core.voice.Voice; - -/** - * Custom supplier class to defer synthesizing with a TTS service - * This allows calling the {@link TTSService} by the client reading the stream only when needed - * and thus not blocking the caching service during its standard operation get/push - * - * @author Gwendal Roulleau - Initial Contribution - * - */ -@NonNullByDefault -public class AudioStreamSupplier { - - private final CachedTTSService tts; - private final String text; - private final Voice voice; - private final AudioFormat requestedAudioFormat; - private boolean resolved = false; - - public AudioStreamSupplier(CachedTTSService tts, String text, Voice voice, AudioFormat requestedAudioFormat) { - super(); - this.tts = tts; - this.text = text; - this.voice = voice; - this.requestedAudioFormat = requestedAudioFormat; - } - - public boolean isResolved() { - return resolved; - } - - public String getText() { - return text; - } - - /** - * Resolve this supplier. You should use this method only once. - * - * @return the {@link AudioStream} - * @throws TTSException - */ - public AudioStream resolve() throws TTSException { - if (resolved) { - throw new TTSException("This TTS request result have already been supplied"); - } - AudioStream audioStream = tts.synthesizeForCache(text, voice, requestedAudioFormat); - resolved = true; - return audioStream; - } - - /** - * If, for any unrecoverable reason, the cache fails, use this method to get the TTS {@link AudioStream} directly - * - * @return the AudioStream - * @throws TTSException - */ - public AudioStream fallBackDirectResolution() throws TTSException { - return tts.synthesizeForCache(text, voice, requestedAudioFormat); - } -} diff --git a/bundles/org.openhab.core.voice/src/main/java/org/openhab/core/voice/internal/cache/TTSLRUCacheImpl.java b/bundles/org.openhab.core.voice/src/main/java/org/openhab/core/voice/internal/cache/TTSLRUCacheImpl.java index abe4fc63357..821bca7ad2f 100644 --- a/bundles/org.openhab.core.voice/src/main/java/org/openhab/core/voice/internal/cache/TTSLRUCacheImpl.java +++ b/bundles/org.openhab.core.voice/src/main/java/org/openhab/core/voice/internal/cache/TTSLRUCacheImpl.java @@ -1,5 +1,5 @@ /** - * Copyright (c) 2010-2022 Contributors to the openHAB project + * Copyright (c) 2010-2023 Contributors to the openHAB project * * See the NOTICE file(s) distributed with this work for additional * information. @@ -12,36 +12,25 @@ */ package org.openhab.core.voice.internal.cache; -import java.io.File; import java.io.IOException; -import java.net.URI; -import java.net.URISyntaxException; -import java.nio.file.FileStore; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.Paths; -import java.util.Iterator; -import java.util.LinkedHashMap; -import java.util.List; +import java.io.InputStream; import java.util.Map; -import java.util.Map.Entry; -import java.util.concurrent.locks.ReentrantLock; -import java.util.stream.Collectors; import org.eclipse.jdt.annotation.NonNullByDefault; import org.eclipse.jdt.annotation.Nullable; -import org.openhab.core.OpenHAB; import org.openhab.core.audio.AudioFormat; import org.openhab.core.audio.AudioStream; +import org.openhab.core.cache.lru.DataRetrievalException; +import org.openhab.core.cache.lru.InputStreamCacheWrapper; +import org.openhab.core.cache.lru.LRUMediaCache; +import org.openhab.core.cache.lru.LRUMediaCacheEntry; import org.openhab.core.config.core.ConfigParser; -import org.openhab.core.storage.Storage; import org.openhab.core.storage.StorageService; import org.openhab.core.voice.TTSCache; import org.openhab.core.voice.TTSException; import org.openhab.core.voice.TTSService; import org.openhab.core.voice.Voice; import org.openhab.core.voice.internal.VoiceManagerImpl; -import org.openhab.core.voice.internal.cache.TTSResult.AudioFormatInfo; import org.osgi.service.component.annotations.Activate; import org.osgi.service.component.annotations.Component; import org.osgi.service.component.annotations.Modified; @@ -69,17 +58,9 @@ public class TTSLRUCacheImpl implements TTSCache { static final String CONFIG_CACHE_SIZE_TTS = "cacheSizeTTS"; static final String CONFIG_ENABLE_CACHE_TTS = "enableCacheTTS"; - public static final String SOUND_EXT = ".snd"; - static final String VOICE_TTS_CACHE_PID = "org.openhab.voice.tts"; - private static final String CACHE_FOLDER_NAME = "cache"; - - final LinkedHashMap ttsResultMap; - /** - * Lock the cache to handle concurrency - */ - private final ReentrantLock lock = new ReentrantLock(); + private @Nullable LRUMediaCache lruMediaCache; /** * The size limit, in bytes. The size is not a hard one, because the final size of the @@ -88,18 +69,14 @@ public class TTSLRUCacheImpl implements TTSCache { protected long cacheSizeTTS = DEFAULT_CACHE_SIZE_TTS * 1024; protected boolean enableCacheTTS = true; - private final Path cacheFolder; - - private Storage storage; + private StorageService storageService; /** * Constructs a cache system for TTS result. */ @Activate public TTSLRUCacheImpl(@Reference StorageService storageService, Map config) { - this.storage = storageService.getStorage(VOICE_TTS_CACHE_PID, this.getClass().getClassLoader()); - this.ttsResultMap = new LinkedHashMap<>(20, .75f, true); - cacheFolder = Path.of(OpenHAB.getUserDataFolder(), CACHE_FOLDER_NAME, VOICE_TTS_CACHE_PID); + this.storageService = storageService; activate(config); } @@ -115,144 +92,56 @@ protected void activate(Map config) { DEFAULT_CACHE_SIZE_TTS) * 1024; if (enableCacheTTS) { - try { - // creating directory if needed : - logger.debug("Creating TTS cache folder '{}'", cacheFolder); - Files.createDirectories(cacheFolder); - - // check if we have enough space : - if (getFreeSpace() < (cacheSizeTTS * 2)) { - enableCacheTTS = false; - logger.warn("Not enough space for the TTS cache"); - } - logger.debug("Using TTS cache folder '{}'", cacheFolder); - - cleanCacheDirectory(); - loadAll(); - } catch (IOException | SecurityException e) { - logger.error("Cannot initialize the TTS cache folder. Reason: {}", e.getMessage()); - enableCacheTTS = false; - } - } - } - - private void cleanCacheDirectory() throws IOException { - try { - List<@Nullable Path> filesInCacheFolder = Files.list(cacheFolder).collect(Collectors.toList()); - - // 1 delete empty files - for (Path path : filesInCacheFolder) { - if (path != null) { - File file = path.toFile(); - if (file.length() == 0) { - file.delete(); - } - } - } - - // 2 clean orphan (part of a pair (sound + info files) without a corresponding partner) - // 2-a delete sound files without AudioFormatInfo - for (Path path : filesInCacheFolder) { - if (path != null) { - String fileName = path.getFileName().toString(); - if (fileName.endsWith(SOUND_EXT)) { - String fileNameWithoutExtension = fileName.replaceAll("\\.\\w+$", ""); - ; - // check corresponding AudioFormatInfo in storage - AudioFormatInfo audioFormatInfo = storage.get(fileNameWithoutExtension); - if (audioFormatInfo == null) { - Files.delete(path); - } - } - } - } - // 2-b delete AudioFormatInfo without corresponding sound file - for (Entry entry : storage.stream().toList()) { - Path correspondingAudioFile = cacheFolder.resolve(entry.getKey() + SOUND_EXT); - if (!Files.exists(correspondingAudioFile)) { - storage.remove(entry.getKey()); - } - } - } catch ( - - IOException e) { - logger.warn("Cannot load the TTS cache directory : {}", e.getMessage()); - return; - } - } - - private long getFreeSpace() { - try { - URI rootURI = new URI("file:///"); - Path rootPath = Paths.get(rootURI); - Path dirPath = rootPath.resolve(cacheFolder.getParent()); - FileStore dirFileStore = Files.getFileStore(dirPath); - return dirFileStore.getUsableSpace(); - } catch (URISyntaxException | IOException e) { - logger.error("Cannot compute free disk space for the TTS cache. Reason: {}", e.getMessage()); - return 0; + this.lruMediaCache = new LRUMediaCache<>(storageService, cacheSizeTTS, VOICE_TTS_CACHE_PID, + this.getClass().getClassLoader()); } } @Override public AudioStream get(CachedTTSService tts, String text, Voice voice, AudioFormat requestedFormat) throws TTSException { - if (!enableCacheTTS) { + + LRUMediaCache lruMediaCacheLocal = lruMediaCache; + if (!enableCacheTTS || lruMediaCacheLocal == null) { return tts.synthesizeForCache(text, voice, requestedFormat); } - // initialize the supplier stream from the TTS service tts - AudioStreamSupplier ttsSynthesizerSupplier = new AudioStreamSupplier(tts, text, voice, requestedFormat); - lock.lock(); // (a get operation also need the lock as it will update the head of the cache) + String key = tts.getClass().getSimpleName() + "_" + tts.getCacheKey(text, voice, requestedFormat); + LRUMediaCacheEntry fileAndMetadata = lruMediaCacheLocal.get(key, () -> { + AudioStream audioInputStream; + try { + audioInputStream = tts.synthesizeForCache(text, voice, requestedFormat); + } catch (TTSException e) { + throw new DataRetrievalException("Cannot compute TTS", e); + } + return new LRUMediaCacheEntry(key, audioInputStream, + new AudioFormatInfo(audioInputStream.getFormat())); + }); + try { - String key = tts.getClass().getSimpleName() + "_" + tts.getCacheKey(text, voice, requestedFormat); - // try to get from cache - TTSResult ttsResult = ttsResultMap.get(key); - if (ttsResult == null || !ttsResult.getText().equals(text)) { // it's a cache miss or a false positive, we - // must (re)create it - logger.debug("Cache miss {}", key); - ttsResult = new TTSResult(cacheFolder, key, storage, ttsSynthesizerSupplier); - ttsResultMap.put(key, ttsResult); + InputStream inputStream = fileAndMetadata.getInputStream(); + AudioFormatInfo metadata = fileAndMetadata.getMetadata(); + if (metadata == null) { + throw new IllegalStateException("Cannot have an audio input stream without audio format information"); + } + if (inputStream instanceof InputStreamCacheWrapper inputStreamCacheWrapper) { + // we are sure that the cache is used, and so we can use an AudioStream + // implementation that use convenient methods for some client, like getClonedStream() + // or mark /reset + return new AudioStreamFromCache(inputStreamCacheWrapper, metadata); } else { - logger.debug("Cache hit {}", key); + // the cache is not used, we can use the original response AudioStream + return (AudioStream) fileAndMetadata.getInputStream(); } - return ttsResult.getAudioStream(ttsSynthesizerSupplier); - } finally { - lock.unlock(); - } - } - - void put(@Nullable TTSResult ttsResult) { - if (ttsResult != null) { - ttsResultMap.put(ttsResult.getKey(), ttsResult); - } - makeSpace(); - } - - /** - * Load all {@link TTSResult} cached to the disk. - */ - private void loadAll() throws IOException { - ttsResultMap.clear(); - storage.stream().map(entry -> new TTSResult(cacheFolder, entry.getKey(), storage)) - .filter(ttsR -> !ttsR.getText().isBlank()).forEach(this::put); - } - - /** - * Check if the cache is not already full and make space if needed. - * We don't use the removeEldestEntry test method from the linkedHashMap because it can only remove one element. - */ - private void makeSpace() { - Iterator<@Nullable TTSResult> iterator = ttsResultMap.values().iterator(); - Long cacheSize = ttsResultMap.values().stream().map(ttsR -> ttsR == null ? 0 : ttsR.getCurrentSize()).reduce(0L, - (Long::sum)); - while (cacheSize > cacheSizeTTS && ttsResultMap.size() > 1) { - TTSResult oldestEntry = iterator.next(); - if (oldestEntry != null) { - oldestEntry.deleteFiles(); - cacheSize -= oldestEntry.getTotalSize(); + } catch (IOException e) { + logger.warn("Cannot get audio from cache, fallback to TTS service"); + return tts.synthesizeForCache(text, voice, requestedFormat); + } catch (DataRetrievalException de) { + if (de.getCause()instanceof TTSException ttsException) { + throw ttsException; + } else { + throw de; } - iterator.remove(); } } } diff --git a/bundles/org.openhab.core.voice/src/main/java/org/openhab/core/voice/internal/cache/TTSResult.java b/bundles/org.openhab.core.voice/src/main/java/org/openhab/core/voice/internal/cache/TTSResult.java deleted file mode 100644 index caee81c6990..00000000000 --- a/bundles/org.openhab.core.voice/src/main/java/org/openhab/core/voice/internal/cache/TTSResult.java +++ /dev/null @@ -1,445 +0,0 @@ -/** - * Copyright (c) 2010-2022 Contributors to the openHAB project - * - * See the NOTICE file(s) distributed with this work for additional - * information. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License 2.0 which is available at - * http://www.eclipse.org/legal/epl-2.0 - * - * SPDX-License-Identifier: EPL-2.0 - */ -package org.openhab.core.voice.internal.cache; - -import java.io.File; -import java.io.IOException; -import java.nio.ByteBuffer; -import java.nio.channels.FileChannel; -import java.nio.file.Path; -import java.nio.file.StandardOpenOption; -import java.util.EnumSet; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; - -import org.eclipse.jdt.annotation.NonNullByDefault; -import org.eclipse.jdt.annotation.Nullable; -import org.openhab.core.audio.AudioFormat; -import org.openhab.core.audio.AudioStream; -import org.openhab.core.audio.FixedLengthAudioStream; -import org.openhab.core.storage.Storage; -import org.openhab.core.voice.TTSException; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -/** - * A cached sound resulting from a call to a TTSService - * This class adds also the capability to serve multiple sinks concurrently - * without asking already retrieved data to the wrapped stream. - * - * @author Gwendal Roulleau - Initial contribution - */ -@NonNullByDefault -public class TTSResult { - - private final Logger logger = LoggerFactory.getLogger(TTSResult.class); - - /** - * Arbitrary chunk size. Small is less latency but more small calls and CPU load. - */ - private static final int CHUNK_SIZE = 10000; - - /** - * Take count of the number of {@link AudioStreamCacheWrapper} currently using this {@link TTSResult} - */ - private int countAudioStreamClient = 0; - - /** - * A unique key to identify the result - * (used to build the filename) - */ - private final String key; - - private final String text; - - // The inner AudioStream, with a supplier - private @Nullable AudioStreamSupplier ttsAudioStreamSupplier; - private @Nullable AudioStream ttsAudioStream; - private @Nullable AudioFormat audioFormat; - - // The cache files where the sound and its info are stored : - private final File soundFile; - private final Storage storage; - private long currentSize = 0; - private boolean completed; - - private @Nullable FileChannel fileChannel; - private final Lock fileOperationLock = new ReentrantLock(); - - /** - * This constructor is used when the file is fully cached on disk. - * The file on disk will provide the data, and the additional file will - * provide metadata. - * - * @param cacheDirectory Where are the cached file stored - * @param key a unique key - * @param storate a storage backend to store metadata - */ - public TTSResult(Path cacheDirectory, String key, Storage storage) { - this.key = key; - this.soundFile = cacheDirectory.resolve(key + TTSLRUCacheImpl.SOUND_EXT).toFile(); - this.storage = storage; - this.ttsAudioStreamSupplier = null; - this.completed = true; - AudioFormatInfo audioFormatInfo = storage.get(key); - if (soundFile.length() == 0 || audioFormatInfo == null) { - this.text = ""; - return; - } - this.currentSize = soundFile.length(); - this.text = audioFormatInfo.text; - this.audioFormat = new AudioFormat(audioFormatInfo.container, audioFormatInfo.codec, audioFormatInfo.bigEndian, - audioFormatInfo.bitDepth, audioFormatInfo.bitRate, audioFormatInfo.frequency, audioFormatInfo.channels); - } - - /*** - * This constructor is used when the file is not yet cached on disk. - * The {@link AudioStreamSupplier} will provide the data. - * - * @param cacheDirectory The cache folder - * @param key A unique key to identify the produced TTS sound - * @param ttsSynthesizerSupplier The {@link AudioStreamSupplier} for the result we want to cache, from the TTS - * service - */ - public TTSResult(Path cacheDirectory, String key, Storage storage, - AudioStreamSupplier ttsSynthesizerSupplier) { - this.key = key; - this.text = ttsSynthesizerSupplier.getText(); - this.soundFile = cacheDirectory.resolve(key + TTSLRUCacheImpl.SOUND_EXT).toFile(); - this.storage = storage; - this.ttsAudioStreamSupplier = ttsSynthesizerSupplier; - this.completed = false; - } - - /** - * Get total size of the underlying sound stream. - * If not already completed, will query the stream inside, - * or get all the data. - * - * @return - */ - protected Long getTotalSize() { - if (completed) { // we already know the total size of the sound - return currentSize; - } else { - // first try to check if the inner stream has the information - AudioStream ttsAudioStreamLocal = ttsAudioStream; - if (ttsAudioStreamLocal != null - && ttsAudioStreamLocal instanceof FixedLengthAudioStream fixedLengthAudioStream) { - return fixedLengthAudioStream.length(); - } - // else, we must force-read all the stream to get the real size - try { - read(0, Integer.MAX_VALUE); - } catch (IOException e) { - logger.debug("Cannot read the total size of the TTS result. Using 0", e); - } - return currentSize; - } - } - - /** - * Get the current size - * - * @return - */ - public long getCurrentSize() { - return currentSize; - } - - protected void setSize(long size) { - this.currentSize = size; - } - - public String getText() { - return text; - } - - public String getKey() { - return key; - } - - /** - * Open an openHAB cached AudioStream wrapped around the file - * There could be several clients AudioStream on the same TTSResult - * - * @param fallbackAudioStreamSupplier If something goes wrong with the cache, this supplier will provide the - * AudioStream directly from the TTS service - * - * @return An {@link AudioStream} that can be used to play sound - */ - protected AudioStream getAudioStream(AudioStreamSupplier fallbackAudioStreamSupplier) { - logger.debug("Trying to open a cache audiostream client for {}", soundFile.getName()); - - // if this TTSResult was loaded from file, take the opportunity to record the supplier : - if (ttsAudioStreamSupplier == null) { - ttsAudioStreamSupplier = fallbackAudioStreamSupplier; - } - - fileOperationLock.lock(); - try { - countAudioStreamClient++; - // we could have to open the fileChannel - FileChannel fileChannelLocal = fileChannel; - if (fileChannelLocal == null || !soundFile.exists()) { - try { - fileChannelLocal = FileChannel.open(soundFile.toPath(), - EnumSet.of(StandardOpenOption.CREATE, StandardOpenOption.READ, StandardOpenOption.WRITE)); - fileChannel = fileChannelLocal; - // if the file size is 0 but the completed boolean is true, THEN it means the file have - // been deleted. We must mark the file as to be recreated by reseting everything : - if (completed && fileChannelLocal.size() == 0) { - logger.debug("The cached sound file {} is not present anymore. We have to recreate it", - soundFile.getName()); - resetStream(fallbackAudioStreamSupplier); - } - } catch (IOException e) { - logger.info("Cannot open the TTS cached fileChannel for file {}", soundFile.getName()); - } - } - } finally { - fileOperationLock.unlock(); - } - return new AudioStreamCacheWrapper(this, fallbackAudioStreamSupplier); - } - - private void resetStream(AudioStreamSupplier fallbackAudioStreamSupplier) { - completed = false; - currentSize = 0; - ttsAudioStream = null; - ttsAudioStreamSupplier = fallbackAudioStreamSupplier; - } - - /** - * This method is called by a wrapper when it has been closed by a client - * The file and the inner stream could then be closed, if and only if no other client are accessing it. - * - * @throws IOException - */ - protected void closeAudioStreamClient() throws IOException { - logger.debug("Trying to close a cached audiostream client for {}", soundFile.getName()); - fileOperationLock.lock(); - try { - countAudioStreamClient--; - if (countAudioStreamClient <= 0) {// no more client reading or writing : closing the - // filechannel - try { - FileChannel fileChannelLocal = fileChannel; - if (fileChannelLocal != null) { - try { - logger.debug("Effectively close the TTS cache filechannel for {}", soundFile.getName()); - fileChannelLocal.close(); - } finally { - fileChannel = null; - } - } - } finally { - AudioStream ttsAudioStreamLocal = ttsAudioStream; - if (ttsAudioStreamLocal != null) { - ttsAudioStreamLocal.close(); - } - } - } - } finally { - fileOperationLock.unlock(); - } - } - - /** - * This method is called when we can't defer anymore the call to the TTS service. - * The request is then resolved to get the real AudioStream - * - * @throws TTSException - */ - private void resolveAudioStreamSupplier() throws TTSException { - AudioStreamSupplier audioStreamSupplierLocal = ttsAudioStreamSupplier; - if (ttsAudioStream == null && audioStreamSupplierLocal != null && !audioStreamSupplierLocal.isResolved()) { - logger.trace("Trying to synchronize for resolving supplier"); - synchronized (audioStreamSupplierLocal) { - if (!audioStreamSupplierLocal.isResolved()) { // test again after getting the lock - try { - AudioStream audioStreamResolved = audioStreamSupplierLocal.resolve(); - ttsAudioStream = audioStreamResolved; - AudioFormat audioFormatFromTTSAudioStream = audioStreamResolved.getFormat(); - this.audioFormat = audioFormatFromTTSAudioStream; - // now that we get the real response format, we can store the metadata - storeInfo(audioFormatFromTTSAudioStream); - } catch (IOException e) { - throw new TTSException("Cannot create TTS cache file", e); - } - } - } - } - } - - /** - * Get AudioFormat for this TTSResult. - * Check the underlying AudioStream if the metadata is not available - * - * @return - */ - protected AudioFormat getAudioFormat() { - AudioFormat audioFormatLocal = this.audioFormat; - if (audioFormatLocal != null) { - return audioFormatLocal; - } else { - try { - resolveAudioStreamSupplier(); - } catch (TTSException e) { - logger.warn("Cannot get or store audio format from the TTS audio service: {}", e.getMessage()); - } - audioFormatLocal = audioFormat; - if (audioFormatLocal == null) { // should't happen : resolve and synthezise MUST fill it - logger.warn("Cannot get audio format for TTS sound file {}. Assuming WAV", soundFile.getName()); - return AudioFormat.WAV; - } - return audioFormatLocal; - } - } - - /** - * Read from the cached file. If there is not enough bytes to read in the file, the AudioStream from the TTS service - * will be queried. - * - * @param start The offset to read the file from - * @param sizeToRead the number of byte to read - * @return A byte array from the file. The size may or may not be the sizeToRead requested - * @throws IOException - */ - protected byte[] read(int start, int sizeToRead) throws IOException { - FileChannel fileChannelLocal = fileChannel; - if (fileChannelLocal == null) { - throw new IOException("Cannot read TTS cache from null file channel. Shouldn't happen"); - } - try { - // check if we need to get data from the inner stream. Note : if completeSize != null, then the end of - // stream has already been reached : - if (start + sizeToRead > fileChannelLocal.size() && !completed) { - logger.trace("Maybe need to get data from inner stream"); - resolveAudioStreamSupplier(); - // try to get new bytes from the inner stream - AudioStream ttsAudioStreamLocal = ttsAudioStream; - if (ttsAudioStreamLocal != null) { - logger.trace("Trying to synchronize for reading inner audiostream"); - synchronized (ttsAudioStreamLocal) { - // now that we really have the lock, test again if we really need data from the stream - while (start + sizeToRead > fileChannelLocal.size() && !completed) { - logger.trace("Really need to get data from inner stream"); - byte[] readFromTTS = ttsAudioStreamLocal.readNBytes(CHUNK_SIZE); - if (readFromTTS.length == 0) { // we read all the stream - logger.trace("End of the stream reached"); - completed = true; - } else { - fileChannelLocal.write(ByteBuffer.wrap(readFromTTS), currentSize); - logger.trace("writing {} bytes to {}", readFromTTS.length, soundFile.getName()); - currentSize += readFromTTS.length; - } - } - } - } - } - // the cache file is now filled, get bytes from it. - long maxToRead = Math.min(currentSize, sizeToRead); - ByteBuffer byteBufferFromChannelFile = ByteBuffer.allocate((int) maxToRead); - int byteReadNumber = fileChannelLocal.read(byteBufferFromChannelFile, Integer.valueOf(start).longValue()); - logger.debug("Read {} bytes from the filechannel", byteReadNumber); - if (byteReadNumber > 0) { - byte[] resultByteArray = new byte[byteReadNumber]; - byteBufferFromChannelFile.rewind(); - byteBufferFromChannelFile.get(resultByteArray); - return resultByteArray; - } else { - return new byte[0]; - } - } catch (TTSException e) { - throw new IOException("Cannot read byte from the TTS service", e); - } - } - - /** - * Return the number of bytes that we can actually read without calling - * the underlying stream - * - * @param offset - * @return - */ - protected int availableFrom(int offset) { - FileChannel fileChannelLocal = fileChannel; - if (fileChannelLocal == null) { - return 0; - } - try { - return Math.max(0, Long.valueOf(fileChannelLocal.size() - offset).intValue()); - } catch (IOException e) { - logger.debug("Cannot get file length for TTS sound file {}", soundFile.getName()); - return 0; - } - } - - /** - * Create the metadata file alongside - * - * @param responseFormat The audio format effectively produced by the TTS - * @throws IOException - */ - private void storeInfo(AudioFormat responseFormat) throws IOException { - AudioFormatInfo audioFormatInfo = new AudioFormatInfo(text, responseFormat.isBigEndian(), - responseFormat.getBitDepth(), responseFormat.getBitRate(), responseFormat.getFrequency(), - responseFormat.getChannels(), responseFormat.getCodec(), responseFormat.getContainer()); - storage.put(key, audioFormatInfo); - - } - - public void deleteFiles() { - logger.debug("Receiving call to delete the audio file {}", soundFile.getName()); - fileOperationLock.lock(); - try { - // check if a client is actually reading the file - if (countAudioStreamClient <= 0) { - logger.debug("Effectively deleting the audio file {}", soundFile.getName()); - // delete the sound file : - soundFile.delete(); - // and the associated info - storage.remove(key); - } - } finally { - fileOperationLock.unlock(); - } - } - - // We cannot use a record yet (requires Gson v2.10) - public static class AudioFormatInfo { - public final String text; - public final @Nullable Boolean bigEndian; - public final @Nullable Integer bitDepth; - public final @Nullable Integer bitRate; - public final @Nullable Long frequency; - public final @Nullable Integer channels; - public final @Nullable String codec; - public final @Nullable String container; - - public AudioFormatInfo(String text, @Nullable Boolean bigEndian, @Nullable Integer bitDepth, - @Nullable Integer bitRate, @Nullable Long frequency, @Nullable Integer channels, @Nullable String codec, - @Nullable String container) { - super(); - this.text = text; - this.bigEndian = bigEndian; - this.bitDepth = bitDepth; - this.bitRate = bitRate; - this.frequency = frequency; - this.channels = channels; - this.codec = codec; - this.container = container; - } - } -} diff --git a/bundles/org.openhab.core.voice/src/test/java/org/openhab/core/voice/internal/cache/AudioStreamCacheWrapperTest.java b/bundles/org.openhab.core.voice/src/test/java/org/openhab/core/voice/internal/cache/AudioStreamCacheWrapperTest.java deleted file mode 100644 index 12953b70e0a..00000000000 --- a/bundles/org.openhab.core.voice/src/test/java/org/openhab/core/voice/internal/cache/AudioStreamCacheWrapperTest.java +++ /dev/null @@ -1,116 +0,0 @@ -/** - * Copyright (c) 2010-2023 Contributors to the openHAB project - * - * See the NOTICE file(s) distributed with this work for additional - * information. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License 2.0 which is available at - * http://www.eclipse.org/legal/epl-2.0 - * - * SPDX-License-Identifier: EPL-2.0 - */ -package org.openhab.core.voice.internal.cache; - -import static org.junit.jupiter.api.Assertions.*; -import static org.mockito.ArgumentMatchers.anyInt; -import static org.mockito.Mockito.*; - -import java.io.IOException; - -import org.eclipse.jdt.annotation.NonNullByDefault; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.ExtendWith; -import org.mockito.Mock; -import org.mockito.Mockito; -import org.mockito.junit.jupiter.MockitoExtension; -import org.openhab.core.audio.AudioStream; -import org.openhab.core.voice.TTSException; - -/** - * Test the wrapper stream in the cache system - * - * @author Gwendal Roulleau - Initial contribution - */ -@NonNullByDefault -@ExtendWith(MockitoExtension.class) -public class AudioStreamCacheWrapperTest { - - private @Mock @NonNullByDefault({}) TTSResult ttsResultMock; - - /** - * Test the read() method - * - * @throws IOException - */ - @Test - public void cacheWrapperStreamTest() throws IOException { - AudioStreamSupplier mockedAudioStreamSupplier = Mockito.mock(AudioStreamSupplier.class); - when(ttsResultMock.read(0, 1)).thenReturn(new byte[] { 1 }); - when(ttsResultMock.read(1, 1)).thenReturn(new byte[] { 2 }); - when(ttsResultMock.read(2, 1)).thenReturn(new byte[] { 3 }); - when(ttsResultMock.read(3, 1)).thenReturn(new byte[0]); - - try (AudioStreamCacheWrapper audioStreamCacheWrapper = new AudioStreamCacheWrapper(ttsResultMock, - mockedAudioStreamSupplier)) { - assertEquals(1, audioStreamCacheWrapper.read()); - assertEquals(2, audioStreamCacheWrapper.read()); - assertEquals(3, audioStreamCacheWrapper.read()); - assertEquals(-1, audioStreamCacheWrapper.read()); - } - - verify(ttsResultMock, times(4)).read(anyInt(), anyInt()); - verify(ttsResultMock).closeAudioStreamClient(); - verifyNoMoreInteractions(ttsResultMock); - } - - /** - * Test the read by batch method - * - * @throws IOException - */ - @Test - public void cacheWrapperStreamReadBunchTest() throws IOException { - when(ttsResultMock.read(anyInt(), anyInt())).thenReturn(new byte[] { 1 }, new byte[] { 2, 3 }, new byte[0]); - - AudioStreamSupplier mockedAudioStreamSupplier = Mockito.mock(AudioStreamSupplier.class); - try (AudioStreamCacheWrapper audioStreamCacheWrapper = new AudioStreamCacheWrapper(ttsResultMock, - mockedAudioStreamSupplier)) { - assertArrayEquals(new byte[] { 1, 2, 3 }, audioStreamCacheWrapper.readAllBytes()); - } - verify(ttsResultMock, times(3)).read(anyInt(), anyInt()); - verify(ttsResultMock).closeAudioStreamClient(); - verifyNoMoreInteractions(ttsResultMock); - } - - /** - * Read two bytes from the cached TTSResult, then failed and get next bytes from the fallback mechanism - * - * @throws IOException - * @throws TTSException - */ - @Test - public void fallbackTest() throws IOException, TTSException { - // the TTS result will be the first stream read - // it will read two byte then fail with IOException - when(ttsResultMock.read(anyInt(), anyInt())).thenReturn(new byte[] { 1 }, new byte[] { 2 }) - .thenThrow(new IOException()); - - // this audiostream will be the fallback, the third and fourth bytes will be read from it - AudioStream mockedAudioStream = Mockito.mock(AudioStream.class); - when(mockedAudioStream.read()).thenReturn(3, 4); - AudioStreamSupplier mockedAudioStreamSupplier = Mockito.mock(AudioStreamSupplier.class); - when(mockedAudioStreamSupplier.fallBackDirectResolution()).thenReturn(mockedAudioStream); - - try (AudioStreamCacheWrapper audioStreamCacheWrapper = new AudioStreamCacheWrapper(ttsResultMock, - mockedAudioStreamSupplier)) { - assertEquals((byte) 1, audioStreamCacheWrapper.read()); - assertEquals((byte) 2, audioStreamCacheWrapper.read()); - assertEquals((byte) 3, audioStreamCacheWrapper.read()); - assertEquals((byte) 4, audioStreamCacheWrapper.read()); - - // the fallback stream has two bytes skipped, because it has been read from the TTSResult - verify(mockedAudioStream).skip(2); - } - } -} diff --git a/bundles/org.openhab.core.voice/src/test/java/org/openhab/core/voice/internal/cache/TTSLRUCacheImplTest.java b/bundles/org.openhab.core.voice/src/test/java/org/openhab/core/voice/internal/cache/TTSLRUCacheImplTest.java index f3f25b25a77..67ef7fee3f1 100644 --- a/bundles/org.openhab.core.voice/src/test/java/org/openhab/core/voice/internal/cache/TTSLRUCacheImplTest.java +++ b/bundles/org.openhab.core.voice/src/test/java/org/openhab/core/voice/internal/cache/TTSLRUCacheImplTest.java @@ -30,7 +30,6 @@ import org.junit.jupiter.api.extension.ExtendWith; import org.junit.jupiter.api.io.TempDir; import org.mockito.Mock; -import org.mockito.Mockito; import org.mockito.junit.jupiter.MockitoExtension; import org.openhab.core.OpenHAB; import org.openhab.core.audio.AudioFormat; @@ -40,10 +39,9 @@ import org.openhab.core.test.storage.VolatileStorageService; import org.openhab.core.voice.TTSException; import org.openhab.core.voice.Voice; -import org.openhab.core.voice.internal.cache.TTSResult.AudioFormatInfo; /** - * Test the cache system + * Test the TTS cache system * * @author Gwendal Roulleau - Initial contribution */ @@ -57,8 +55,6 @@ public class TTSLRUCacheImplTest { private @Mock @NonNullByDefault({}) CachedTTSService ttsServiceMock; - private @Mock @NonNullByDefault({}) AudioStreamSupplier supplierMock; - private @Mock @NonNullByDefault({}) AudioStream audioStreamMock; private @NonNullByDefault({}) Storage storage; @@ -79,125 +75,6 @@ private TTSLRUCacheImpl createTTSCache(long size) throws IOException { return voiceLRUCache; } - /** - * Basic get and set for the LRU cache - * - * @throws IOException - */ - @Test - public void simpleLRUPutAndGetTest() throws IOException { - TTSLRUCacheImpl voiceLRUCache = createTTSCache(10); - TTSResult ttsResult = new TTSResult(tempDir, "key1", storage, supplierMock); - voiceLRUCache.put(ttsResult); - assertEquals(ttsResult, voiceLRUCache.ttsResultMap.get("key1")); - assertEquals(null, voiceLRUCache.ttsResultMap.get("key2")); - } - - /** - * Test the LRU eviction policy - * - * @throws IOException - */ - @Test - public void putAndGetAndEvictionOrderLRUTest() throws IOException { - TTSLRUCacheImpl voiceLRUCache = createTTSCache(10); - TTSResult ttsResult1 = new TTSResult(tempDir, "key1", storage, supplierMock); - ttsResult1.setSize(4 * 1024); - voiceLRUCache.put(ttsResult1); - TTSResult ttsResult2 = new TTSResult(tempDir, "key2", storage, supplierMock); - ttsResult2.setSize(4 * 1024); - voiceLRUCache.put(ttsResult2); - TTSResult ttsResult3 = new TTSResult(tempDir, "key3", storage, supplierMock); - ttsResult3.setSize(2 * 1024); - voiceLRUCache.put(ttsResult3); - TTSResult ttsResult4 = new TTSResult(tempDir, "key4", storage, supplierMock); - ttsResult4.setSize(4 * 1024); - voiceLRUCache.put(ttsResult4); - - // ttsResult1 should be evicted now (size limit is 10, and effective size is 12 when we try to put the - // ttsResult4) - assertEquals(null, voiceLRUCache.ttsResultMap.get("key1")); - - // getting ttsResult2 will put it in head, ttsResult3 is now tail - assertEquals(ttsResult2, voiceLRUCache.ttsResultMap.get("key2")); - - // putting again ttsResult1 should expel tail, which is ttsResult3 - voiceLRUCache.put(ttsResult1); - assertEquals(null, voiceLRUCache.ttsResultMap.get("key3")); - } - - /** - * Test the file deletion - * - * @throws IOException - */ - @Test - public void fileDeletionTest() throws IOException { - TTSResult ttsResult1 = new TTSResult(tempDir, "key1", storage, supplierMock); - File ttsResult1File = tempDir.resolve(ttsResult1.getKey() + TTSLRUCacheImpl.SOUND_EXT).toFile(); - ttsResult1File.createNewFile(); - AudioFormatInfo audioFormatInfo = new AudioFormatInfo("text", false, 1, 1, 1L, 1, "wav", "wav"); - storage.put("key1", audioFormatInfo); - - ttsResult1.deleteFiles(); - - assertFalse(ttsResult1File.exists()); - assertNull(storage.get("key1")); - } - - /** - * Test that the eviction policy calls the delete method - * - * @throws IOException - */ - @Test - public void fileDeletionWhenEvictionTest() throws IOException { - TTSLRUCacheImpl voiceLRUCache = createTTSCache(10); - - TTSResult ttsResultToEvict = Mockito.mock(TTSResult.class); - when(ttsResultToEvict.getCurrentSize()).thenReturn(15L * 1024); - when(ttsResultToEvict.getKey()).thenReturn("ttsResultToEvict"); - voiceLRUCache.put(ttsResultToEvict); - - // the cache is already full, so the next put will delete ttsResultToEvict - TTSResult ttsResult2 = new TTSResult(tempDir, "key2", storage, supplierMock); - ttsResult2.setSize(4 * 1024); - voiceLRUCache.put(ttsResult2); - - verify(ttsResultToEvict).deleteFiles(); - } - - /** - * This test checks than we can overwrite a previously - * cached entry and then it is now at the head position. - * - * @throws IOException - */ - @Test - public void putExistingResultLRUTest() throws IOException { - TTSLRUCacheImpl voiceLRUCache = createTTSCache(10); - TTSResult ttsResult1 = new TTSResult(tempDir, "key1", storage, supplierMock); - ttsResult1.setSize(4 * 1024); - voiceLRUCache.put(ttsResult1); - TTSResult ttsResult2 = new TTSResult(tempDir, "key2", storage, supplierMock); - ttsResult2.setSize(10 * 1024); - voiceLRUCache.put(ttsResult2); - - // put again key1 --> key2 is now tail - voiceLRUCache.put(ttsResult1); - - TTSResult ttsResult3 = new TTSResult(tempDir, "key3", storage, supplierMock); - ttsResult3.setSize(4 * 1024); - voiceLRUCache.put(ttsResult3); - - // ttsResult2 should be expelled now - assertEquals(null, voiceLRUCache.ttsResultMap.get("key2")); - - // ttsResult1 and ttsResult3 are a hit - assertEquals(ttsResult1, voiceLRUCache.ttsResultMap.get("key1")); - assertEquals(ttsResult3, voiceLRUCache.ttsResultMap.get("key3")); - } - /** * Simulate a cache miss, then two other hits * The TTS service is called only once @@ -217,23 +94,21 @@ public void getCacheMissAndHit() throws TTSException, IOException { // first cache miss AudioStream ttsResultStream = voiceLRUCache.get(ttsServiceMock, "text", voiceMock, AudioFormat.MP3); - // force supplier resolution with a "getFormat": - ttsResultStream.getFormat(); - ttsResultStream.readAllBytes(); + assertEquals(AudioFormat.MP3.getCodec(), ttsResultStream.getFormat().getCodec()); + assertArrayEquals(new byte[2], ttsResultStream.readAllBytes()); ttsResultStream.close(); // then cache hit ttsResultStream = voiceLRUCache.get(ttsServiceMock, "text", voiceMock, AudioFormat.MP3); - // force supplier resolution with a "getFormat" --> won't be called - ttsResultStream.getFormat(); - ttsResultStream.readAllBytes(); + assertEquals(AudioFormat.MP3.getCodec(), ttsResultStream.getFormat().getCodec()); + assertArrayEquals(new byte[2], ttsResultStream.readAllBytes()); ttsResultStream.close(); // then cache hit ttsResultStream = voiceLRUCache.get(ttsServiceMock, "text", voiceMock, AudioFormat.MP3); // force supplier resolution with a "getFormat" --> won't be called - ttsResultStream.getFormat(); - ttsResultStream.readAllBytes(); + assertEquals(AudioFormat.MP3.getCodec(), ttsResultStream.getFormat().getCodec()); + assertArrayEquals(new byte[2], ttsResultStream.readAllBytes()); ttsResultStream.close(); // even with three call to get and getFormat, the TTS service and the underlying stream were called @@ -250,23 +125,27 @@ public void getCacheMissAndHit() throws TTSException, IOException { * Load some TTSResults from files on disk * * @throws IOException + * @throws TTSException */ @Test - public void loadTTSResultsFromCacheDirectory() throws IOException { + public void loadTTSResultsFromCacheDirectory() throws IOException, TTSException { + // prepare cache directory - Path cacheDirectory = tempDir.resolve("cache/org.openhab.voice.tts/"); + Path cacheDirectory = tempDir.resolve("cache").resolve(TTSLRUCacheImpl.VOICE_TTS_CACHE_PID); Files.createDirectories(cacheDirectory); // prepare some files - File soundFile1 = cacheDirectory.resolve("filesound1.snd").toFile(); - storage.put("filesound1", new TTSResult.AudioFormatInfo("text", null, 42, 16, 16000L, 1, "MP3", null)); + String key1 = ttsServiceMock.getClass().getSimpleName() + "_" + "filesound1"; + File soundFile1 = cacheDirectory.resolve(key1).toFile(); + storage.put(key1, new AudioFormatInfo("text", null, 42, 16, 16000L, 1, "MP3", null)); try (FileWriter soundFile1Writer = new FileWriter(soundFile1)) { soundFile1Writer.write("falsedata"); } // prepare some files - File soundFile2 = cacheDirectory.resolve("filesound2.snd").toFile(); - storage.put("filesound2", new TTSResult.AudioFormatInfo("text", null, 42, 16, 16000L, 2, "MP3", null)); + String key2 = ttsServiceMock.getClass().getSimpleName() + "_" + "filesound2"; + File soundFile2 = cacheDirectory.resolve(key2).toFile(); + storage.put(key2, new AudioFormatInfo("text2", null, 42, 16, 16000L, 2, "MP3", null)); try (FileWriter soundFile2Writer = new FileWriter(soundFile2)) { soundFile2Writer.write("falsedata"); } @@ -274,89 +153,19 @@ public void loadTTSResultsFromCacheDirectory() throws IOException { // create a LRU cache that will use the above data TTSLRUCacheImpl lruCache = createTTSCache(20); - TTSResult ttsResult1 = lruCache.ttsResultMap.get("filesound1"); + // prepare fake key from tts + when(ttsServiceMock.getCacheKey("text", voiceMock, AudioFormat.WAV)).thenReturn("filesound1"); + when(ttsServiceMock.getCacheKey("text2", voiceMock, AudioFormat.WAV)).thenReturn("filesound2"); + + AudioStream ttsResult1 = lruCache.get(ttsServiceMock, "text", voiceMock, AudioFormat.WAV); assertNotNull(ttsResult1); - assertEquals(ttsResult1.getAudioFormat().getChannels(), 1); + assertEquals(1, ttsResult1.getFormat().getChannels()); - TTSResult ttsResult2 = lruCache.ttsResultMap.get("filesound2"); + AudioStream ttsResult2 = lruCache.get(ttsServiceMock, "text2", voiceMock, AudioFormat.WAV); assertNotNull(ttsResult2); - assertEquals(ttsResult2.getAudioFormat().getChannels(), 2); - - TTSResult ttsResult3 = lruCache.ttsResultMap.get("filesound3"); - assertNull(ttsResult3); - } - - @Test - public void enoughFreeDiskSpaceTest() throws IOException { - // arbitrary long value, should throw exception because no disk of - TTSLRUCacheImpl ttsCache = createTTSCache(Long.MAX_VALUE / 10); - assertFalse(ttsCache.enableCacheTTS); - } - - @Test - public void testCleanDirectoryOrphanFiles() throws IOException { - // prepare cache directory - Path cacheDirectory = tempDir.resolve("cache/org.openhab.voice.tts/"); - Files.createDirectories(cacheDirectory); - - // prepare some files : normal entry - File soundFile1 = cacheDirectory.resolve("filesound1.snd").toFile(); - storage.put("filesound1", new TTSResult.AudioFormatInfo("text", null, 42, 16, 16000L, 1, "MP3", null)); - try (FileWriter soundFile1Writer = new FileWriter(soundFile1)) { - soundFile1Writer.write("falsedata"); - } - - // prepare some files : orphan info - storage.put("filesound2", new TTSResult.AudioFormatInfo("text", null, 42, 16, 16000L, 1, "MP3", null)); - - // prepare some files : orphan sound file - File soundFile3 = cacheDirectory.resolve("filesound3.snd").toFile(); - try (FileWriter soundFile3Writer = new FileWriter(soundFile3)) { - soundFile3Writer.write("fake non empty data"); - } - - // create a LRU cache that will use the above data - createTTSCache(20); - - // the file for entry 1 still exists : - assertTrue(soundFile1.exists()); - assertNotNull(storage.get("filesound1")); - - // the file for entry 2 should have been deleted : - assertNull(storage.get("filesound2")); - - // the file for entry 3 should have been deleted : - assertFalse(soundFile3.exists()); - } - - @Test - public void testCleanDirectoryEmptyFiles() throws IOException { - // prepare cache directory - Path cacheDirectory = tempDir.resolve("cache/org.openhab.voice.tts/"); - Files.createDirectories(cacheDirectory); - - // prepare some files : normal entry - File soundFile1 = cacheDirectory.resolve("filesound1.snd").toFile(); - storage.put("filesound1", new TTSResult.AudioFormatInfo("text", null, 42, 16, 16000L, 1, "MP3", null)); - try (FileWriter soundFile1Writer = new FileWriter(soundFile1)) { - soundFile1Writer.write("falsedata"); - } - - // prepare some files : empty file - File soundFile2 = cacheDirectory.resolve("filesound2.snd").toFile(); - soundFile2.createNewFile(); - storage.put("filesound2", new TTSResult.AudioFormatInfo("text", null, 42, 16, 16000L, 1, "MP3", null)); - assertTrue(soundFile2.exists()); - - // create a LRU cache that will load the above data - createTTSCache(20); - - // the file for entry 1 still exists : - assertTrue(soundFile1.exists()); - assertNotNull(storage.get("filesound1")); + assertEquals(2, ttsResult2.getFormat().getChannels()); - // the file for entry 2 should have been deleted : - assertNull(storage.get("filesound2")); - assertFalse(soundFile2.exists()); + // The tts service wasn't called because all data was found in cache : + verifyNoMoreInteractions(ttsServiceMock); } } diff --git a/bundles/org.openhab.core.voice/src/test/java/org/openhab/core/voice/internal/cache/TTSResultTest.java b/bundles/org.openhab.core.voice/src/test/java/org/openhab/core/voice/internal/cache/TTSResultTest.java deleted file mode 100644 index 3e1ea5bce4d..00000000000 --- a/bundles/org.openhab.core.voice/src/test/java/org/openhab/core/voice/internal/cache/TTSResultTest.java +++ /dev/null @@ -1,330 +0,0 @@ -/** - * Copyright (c) 2010-2023 Contributors to the openHAB project - * - * See the NOTICE file(s) distributed with this work for additional - * information. - * - * This program and the accompanying materials are made available under the - * terms of the Eclipse Public License 2.0 which is available at - * http://www.eclipse.org/legal/epl-2.0 - * - * SPDX-License-Identifier: EPL-2.0 - */ -package org.openhab.core.voice.internal.cache; - -import static org.junit.jupiter.api.Assertions.*; -import static org.mockito.Mockito.*; - -import java.io.ByteArrayInputStream; -import java.io.File; -import java.io.FileWriter; -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.nio.file.Path; -import java.util.Arrays; -import java.util.List; -import java.util.Random; -import java.util.stream.Collectors; - -import org.eclipse.jdt.annotation.NonNullByDefault; -import org.junit.jupiter.api.BeforeEach; -import org.junit.jupiter.api.Test; -import org.junit.jupiter.api.extension.ExtendWith; -import org.junit.jupiter.api.io.TempDir; -import org.mockito.Mock; -import org.mockito.junit.jupiter.MockitoExtension; -import org.openhab.core.OpenHAB; -import org.openhab.core.audio.AudioFormat; -import org.openhab.core.audio.AudioStream; -import org.openhab.core.storage.Storage; -import org.openhab.core.test.storage.VolatileStorageService; -import org.openhab.core.voice.TTSException; -import org.openhab.core.voice.Voice; -import org.openhab.core.voice.internal.cache.TTSResult.AudioFormatInfo; - -/** - * Test the cache system - * - * @author Gwendal Roulleau - Initial contribution - */ -@ExtendWith(MockitoExtension.class) -@NonNullByDefault -public class TTSResultTest { - - private @TempDir @NonNullByDefault({}) Path tempDir; - - private @Mock @NonNullByDefault({}) CachedTTSService ttsServiceMock; - - private @Mock @NonNullByDefault({}) Voice voiceMock; - - private @NonNullByDefault({}) Storage storage; - - @BeforeEach - public void init() { - System.setProperty(OpenHAB.USERDATA_DIR_PROG_ARGUMENT, tempDir.toString()); - this.storage = new VolatileStorageService().getStorage(TTSLRUCacheImpl.VOICE_TTS_CACHE_PID); - } - - public static class FakeAudioStream extends AudioStream { - - ByteArrayInputStream innerInputStream; - private boolean closed = false; - - public FakeAudioStream(byte[] byteToReturn) { - innerInputStream = new ByteArrayInputStream(byteToReturn); - } - - @Override - public AudioFormat getFormat() { - return AudioFormat.MP3; - } - - @Override - public int read() throws IOException { - return innerInputStream.read(); - } - - @Override - public void close() throws IOException { - this.closed = true; - innerInputStream.close(); - } - - public boolean isClosed() { - return closed; - } - } - - /** - * Get twice the AudioStream with only one call. - * - * @throws TTSException - * @throws IOException - */ - @Test - public void getAudioStreamTwiceWithOnlyOneCallToTheTTSAndCompare() throws TTSException, IOException { - AudioStream fakeAudioStream = new FakeAudioStream("This is a false string to simulate some data".getBytes()); - AudioStreamSupplier supplier = new AudioStreamSupplier(ttsServiceMock, "text", voiceMock, AudioFormat.MP3); - when(ttsServiceMock.synthesizeForCache("text", voiceMock, AudioFormat.MP3)).thenReturn(fakeAudioStream); - - // broken fallback supplier to ensure that the service will exclusively use the main cache : - AudioStreamSupplier fallbackSupplierBroken = new AudioStreamSupplier(ttsServiceMock, "WRONG DATA", voiceMock, - AudioFormat.MP3); - - TTSResult ttsResult = new TTSResult(tempDir, "key1", storage, supplier); - - // get audiostream wrapped by the cache system - AudioStream actualAudioStream = ttsResult.getAudioStream(fallbackSupplierBroken); - String actuallyRead = new String(actualAudioStream.readAllBytes(), StandardCharsets.UTF_8); - actualAudioStream.close(); - // ensure that the data are not corrupted - assertEquals("This is a false string to simulate some data", actuallyRead); - - // get again the audiostream wrapped by the cache system - actualAudioStream = ttsResult.getAudioStream(fallbackSupplierBroken); - actuallyRead = new String(actualAudioStream.readAllBytes(), StandardCharsets.UTF_8); - actualAudioStream.close(); - // ensure that the data are not corrupted - assertEquals("This is a false string to simulate some data", actuallyRead); - - // Ensure the TTS service was called only once : - verify(ttsServiceMock, times(1)).synthesizeForCache("text", voiceMock, AudioFormat.MP3); - } - - /** - * Load some TTSResults from files on disk - * - * @throws IOException - */ - @Test - public void loadTTSResultFromFile() throws IOException { - // broken fallback supplier to ensure that the service will exclusively use the main cache : - AudioStreamSupplier fallbackSupplierBroken = new AudioStreamSupplier(ttsServiceMock, "WRONG DATA", voiceMock, - AudioFormat.MP3); - - // prepare an info file - TTSResult.AudioFormatInfo audioFormatInfoFile = new TTSResult.AudioFormatInfo("text", null, 42, 16, 16000L, 1, - "MP3", null); - storage.put("filesound1", audioFormatInfoFile); - - // prepare the related sound file - File soundFile1Snd = tempDir.resolve("filesound1.snd").toFile(); - try (FileWriter fileWriterSnd = new FileWriter(soundFile1Snd)) { - fileWriterSnd.write("Fake data"); - } - - // Build the TTSResult that will load the info file - TTSResult ttsResultBuildByFile = new TTSResult(tempDir, "filesound1", storage); - - assertEquals("text", ttsResultBuildByFile.getText()); - - // Test the fake AudioStream as string data - AudioStream audioStreamClient = ttsResultBuildByFile.getAudioStream(fallbackSupplierBroken); - String readFromFile = new String(audioStreamClient.readAllBytes()); - audioStreamClient.close(); - assertEquals("Fake data", readFromFile); - - // test audio format - assertEquals(42, audioStreamClient.getFormat().getBitDepth()); - assertEquals(16, audioStreamClient.getFormat().getBitRate()); - assertEquals(16000, audioStreamClient.getFormat().getFrequency()); - assertEquals("MP3", audioStreamClient.getFormat().getCodec()); - assertEquals(1, audioStreamClient.getFormat().getChannels()); - assertEquals(null, audioStreamClient.getFormat().getContainer()); - } - - /** - * Test that the service can handle several calls concurrently and get the TTS result only once - * - * @throws TTSException - * @throws IOException - */ - @Test - public void loadTwoStreamsAtTheSameTimeFromTheSameTTS() throws TTSException, IOException { - // broken fallback supplier to ensure that the service will exclusively use the main cache : - AudioStreamSupplier fallbackSupplierBroken = new AudioStreamSupplier(ttsServiceMock, "WRONG DATA", voiceMock, - AudioFormat.MP3); - - // init simulated data stream - FakeAudioStream fakeAudioStream = new FakeAudioStream(new byte[] { 1, 2, 3, 4, 5, 6, 7, 8 }); - AudioStreamSupplier supplier = new AudioStreamSupplier(ttsServiceMock, "text", voiceMock, AudioFormat.MP3); - when(ttsServiceMock.synthesizeForCache("text", voiceMock, AudioFormat.MP3)).thenReturn(fakeAudioStream); - - TTSResult ttsResult = new TTSResult(tempDir, "key1", storage, supplier); - - // get a first audiostream wrapped by the cache system - AudioStream actualAudioStream1 = ttsResult.getAudioStream(fallbackSupplierBroken); - - // get a second, concurrent, audiostream wrapped by the cache system - AudioStream actualAudioStream2 = ttsResult.getAudioStream(fallbackSupplierBroken); - - // read bytes from the two stream concurrently - byte[] byteReadFromStream1 = actualAudioStream1.readNBytes(4); - byte[] byteReadFromStream2 = actualAudioStream2.readNBytes(4); - - assertArrayEquals(new byte[] { 1, 2, 3, 4 }, byteReadFromStream1); - assertArrayEquals(new byte[] { 1, 2, 3, 4 }, byteReadFromStream2); - - // second read, from the two stream concurrently - byteReadFromStream1 = actualAudioStream1.readNBytes(4); - byteReadFromStream2 = actualAudioStream2.readNBytes(4); - - actualAudioStream1.close(); - assertFalse(fakeAudioStream.isClosed()); // not closed because there is still one open - actualAudioStream2.close(); - assertTrue(fakeAudioStream.isClosed()); // all client closed, the main stream should also be closed - - assertArrayEquals(new byte[] { 5, 6, 7, 8 }, byteReadFromStream1); - assertArrayEquals(new byte[] { 5, 6, 7, 8 }, byteReadFromStream1); - - // we call the TTS service only once - verify(ttsServiceMock, times(1)).synthesizeForCache("text", voiceMock, AudioFormat.MP3); - verifyNoMoreInteractions(ttsServiceMock); - } - - /** - * Test that the service can handle several calls concurrently in two threads and get the TTS result only once - * - * @throws TTSException - * @throws IOException - */ - @Test - public void loadTwoThreadsAtTheSameTimeFromTheSameTTS() throws TTSException, IOException { - // broken fallback supplier to ensure that the service will exclusively use the main cache : - AudioStreamSupplier fallbackSupplierBroken = new AudioStreamSupplier(ttsServiceMock, "WRONG DATA", voiceMock, - AudioFormat.MP3); - - // init simulated data stream - byte[] randomData = getRandomData(10 * 10240); - FakeAudioStream fakeAudioStream = new FakeAudioStream(randomData); - AudioStreamSupplier supplier = new AudioStreamSupplier(ttsServiceMock, "text", voiceMock, AudioFormat.MP3); - when(ttsServiceMock.synthesizeForCache("text", voiceMock, AudioFormat.MP3)).thenReturn(fakeAudioStream); - - TTSResult ttsResult = new TTSResult(tempDir, "key1", storage, supplier); - - // get a first audiostream wrapped by the cache system - AudioStream actualAudioStream1 = ttsResult.getAudioStream(fallbackSupplierBroken); - - // get a second, concurrent, audiostream wrapped by the cache system - AudioStream actualAudioStream2 = ttsResult.getAudioStream(fallbackSupplierBroken); - - // read bytes from the two stream concurrently - List parallelAudioStreamList = Arrays.asList(actualAudioStream1, actualAudioStream2); - List bytesResultList = parallelAudioStreamList.parallelStream().map(stream -> readSafe(stream)) - .collect(Collectors.toList()); - - assertArrayEquals(randomData, bytesResultList.get(0)); - assertArrayEquals(randomData, bytesResultList.get(1)); - - actualAudioStream1.close(); - assertFalse(fakeAudioStream.isClosed()); // not closed because there is still one open - actualAudioStream2.close(); - assertTrue(fakeAudioStream.isClosed()); // all client closed, the main stream should also be closed - - // we call the TTS service only once - verify(ttsServiceMock, times(1)).synthesizeForCache("text", voiceMock, AudioFormat.MP3); - verifyNoMoreInteractions(ttsServiceMock); - } - - private byte[] readSafe(AudioStream audioStream) { - try { - return audioStream.readAllBytes(); - } catch (IOException e) { - return new byte[0]; - } - } - - private byte[] getRandomData(int length) { - Random random = new Random(); - byte[] randomBytes = new byte[length]; - random.nextBytes(randomBytes); - return randomBytes; - } - - @Test - public void testStreamWithSeveralChunks() throws TTSException, IOException { - // broken fallback supplier to ensure that the service will exclusively use the main cache : - AudioStreamSupplier fallbackSupplierBroken = new AudioStreamSupplier(ttsServiceMock, "WRONG DATA", voiceMock, - AudioFormat.MP3); - - // init simulated data stream (two chunks of 10k) - byte[] randomData = getRandomData(2 * 10240); - FakeAudioStream fakeAudioStream = new FakeAudioStream(randomData); - AudioStreamSupplier supplier = new AudioStreamSupplier(ttsServiceMock, "text", voiceMock, AudioFormat.MP3); - when(ttsServiceMock.synthesizeForCache("text", voiceMock, AudioFormat.MP3)).thenReturn(fakeAudioStream); - - TTSResult ttsResult = new TTSResult(tempDir, "key1", storage, supplier); - - AudioStream audioStreamClient = ttsResult.getAudioStream(fallbackSupplierBroken); - byte[] bytesRead = audioStreamClient.readAllBytes(); - audioStreamClient.close(); - assertTrue(fakeAudioStream.isClosed()); - - assertArrayEquals(randomData, bytesRead); - } - - /** - * Get the total length of the stream by forcing it to read everything - * - * @throws TTSException - * @throws IOException - */ - @Test - public void testGetTotalSize() throws TTSException, IOException { - // broken fallback supplier to ensure that the service will exclusively use the main cache : - AudioStreamSupplier fallbackSupplierBroken = new AudioStreamSupplier(ttsServiceMock, "WRONG DATA", voiceMock, - AudioFormat.MP3); - - // init simulated data stream (two chunks of 10k) - byte[] randomData = getRandomData(2 * 10240); - AudioStream fakeAudioStream = new FakeAudioStream(randomData); - AudioStreamSupplier supplier = new AudioStreamSupplier(ttsServiceMock, "text", voiceMock, AudioFormat.MP3); - when(ttsServiceMock.synthesizeForCache("text", voiceMock, AudioFormat.MP3)).thenReturn(fakeAudioStream); - - TTSResult ttsResult = new TTSResult(tempDir, "key1", storage, supplier); - AudioStream audioStreamClient = ttsResult.getAudioStream(fallbackSupplierBroken); - Long totalSize = ttsResult.getTotalSize(); - audioStreamClient.close(); - assertEquals(2 * 10240, totalSize); - } -} diff --git a/bundles/org.openhab.core/src/main/java/org/openhab/core/cache/lru/DataRetrievalException.java b/bundles/org.openhab.core/src/main/java/org/openhab/core/cache/lru/DataRetrievalException.java new file mode 100644 index 00000000000..22f5f3ecf8a --- /dev/null +++ b/bundles/org.openhab.core/src/main/java/org/openhab/core/cache/lru/DataRetrievalException.java @@ -0,0 +1,30 @@ +/** + * Copyright (c) 2010-2023 Contributors to the openHAB project + * + * See the NOTICE file(s) distributed with this work for additional + * information. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.openhab.core.cache.lru; + +import org.eclipse.jdt.annotation.NonNullByDefault; + +/** + * Custom exception class, launched when we cannot retrieve data from the cache entry supplier + * + * @author Gwendal Roulleau - Initial Contribution + * + */ +@NonNullByDefault +public class DataRetrievalException extends RuntimeException { + private static final long serialVersionUID = 5716770444437349803L; + + public DataRetrievalException(String errorMessage, Throwable cause) { + super(errorMessage, cause); + } +} diff --git a/bundles/org.openhab.core/src/main/java/org/openhab/core/cache/lru/InputStreamCacheWrapper.java b/bundles/org.openhab.core/src/main/java/org/openhab/core/cache/lru/InputStreamCacheWrapper.java new file mode 100644 index 00000000000..1797e028db3 --- /dev/null +++ b/bundles/org.openhab.core/src/main/java/org/openhab/core/cache/lru/InputStreamCacheWrapper.java @@ -0,0 +1,116 @@ +/** + * Copyright (c) 2010-2023 Contributors to the openHAB project + * + * See the NOTICE file(s) distributed with this work for additional + * information. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.openhab.core.cache.lru; + +import java.io.IOException; +import java.io.InputStream; +import java.util.Objects; + +import org.eclipse.jdt.annotation.NonNullByDefault; +import org.eclipse.jdt.annotation.Nullable; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Each cache result instance can handle several {@link InputStream}s. + * This class is a wrapper for such functionality and can + * ask the cached entry for data, allowing concurrent access to + * the source even if it is currently actively read from the supplier service. + * This class implements the two main read methods (byte by byte, and with an array) + * + * @author Gwendal Roulleau - Initial contribution + */ +@NonNullByDefault +public class InputStreamCacheWrapper extends InputStream { + + private final Logger logger = LoggerFactory.getLogger(InputStreamCacheWrapper.class); + + private LRUMediaCacheEntry cacheEntry; + private int offset = 0; + + /*** + * Construct a transparent InputStream wrapper around data from the cache. + * + * @param cacheEntry The parent cached {@link LRUMediaCacheEntry} + */ + public InputStreamCacheWrapper(LRUMediaCacheEntry cacheEntry) { + this.cacheEntry = cacheEntry; + } + + @Override + public int available() throws IOException { + return cacheEntry.availableFrom(offset); + } + + @Override + public int read() throws IOException { + byte[] bytesRead = cacheEntry.read(offset, 1); + if (bytesRead.length == 0) { + return -1; + } else { + offset++; + return bytesRead[0] & 0xff; + } + } + + @Override + public int read(byte @Nullable [] b, int off, int len) throws IOException { + if (b == null) { + throw new IOException("Array to write is null"); + } + Objects.checkFromIndexSize(off, len, b.length); + + if (len == 0) { + return 0; + } + + byte[] bytesRead = cacheEntry.read(offset, len); + offset += bytesRead.length; + if (bytesRead.length == 0) { + return -1; + } + int i = 0; + for (; i < len && i < bytesRead.length; i++) { + b[off + i] = bytesRead[i]; + } + return i; + } + + @Override + public long skip(long n) throws IOException { + offset += n; + return n; + } + + @Override + public void close() throws IOException { + try { + super.close(); + } finally { + cacheEntry.closeStreamClient(); + } + } + + public long length() { + Long totalSize = cacheEntry.getTotalSize(); + if (totalSize > 0L) { + return totalSize; + } + logger.debug("Cannot get the length of the stream"); + return -1; + } + + public InputStream getClonedStream() throws IOException { + return cacheEntry.getInputStream(); + } +} diff --git a/bundles/org.openhab.core/src/main/java/org/openhab/core/cache/lru/LRUMediaCache.java b/bundles/org.openhab.core/src/main/java/org/openhab/core/cache/lru/LRUMediaCache.java new file mode 100644 index 00000000000..f21ed06c27f --- /dev/null +++ b/bundles/org.openhab.core/src/main/java/org/openhab/core/cache/lru/LRUMediaCache.java @@ -0,0 +1,248 @@ +/** + * Copyright (c) 2010-2023 Contributors to the openHAB project + * + * See the NOTICE file(s) distributed with this work for additional + * information. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.openhab.core.cache.lru; + +import java.io.File; +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.nio.file.FileStore; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.Collections; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +import org.eclipse.jdt.annotation.NonNullByDefault; +import org.eclipse.jdt.annotation.Nullable; +import org.openhab.core.OpenHAB; +import org.openhab.core.storage.Storage; +import org.openhab.core.storage.StorageService; +import org.osgi.service.component.annotations.Reference; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Cache system for media files, and their metadata + * This is a LRU cache (least recently used entry is evicted if the size + * is exceeded). + * Size is based on the size on disk (in bytes) + * + * @author Gwendal Roulleau - Initial contribution + */ +@NonNullByDefault +public class LRUMediaCache { + + private final Logger logger = LoggerFactory.getLogger(LRUMediaCache.class); + + private static final String CACHE_FOLDER_NAME = "cache"; + + final Map> cachedResults; + + /** + * Lock to handle concurrent access to the same entry + */ + private final Map lockByEntry = new ConcurrentHashMap<>(); + + /** + * The size limit, in bytes. The size is not a hard one, because the final size of the + * current request is not known and may or may not exceed the limit. + */ + protected final long maxCacheSize; + + private final Path cacheFolder; + + /** + * Store for additional informations along the media file + */ + private Storage storage; + + protected boolean cacheIsOK = true; + + /** + * Constructs a cache system. + * + * @param storageService Storage service to store metadata + * @param cacheSize Limit size, in byte + * @param pid A pid identifying the cache on disk + */ + public LRUMediaCache(@Reference StorageService storageService, long maxCacheSize, String pid, + @Nullable ClassLoader clazzLoader) { + this.storage = storageService.getStorage(pid, clazzLoader); + this.cachedResults = Collections.synchronizedMap(new LinkedHashMap<>(20, .75f, true)); + this.cacheFolder = Path.of(OpenHAB.getUserDataFolder(), CACHE_FOLDER_NAME, pid); + this.maxCacheSize = maxCacheSize; + + // creating directory if needed : + logger.debug("Creating cache folder '{}'", cacheFolder); + try { + Files.createDirectories(cacheFolder); + cleanCacheDirectory(); + loadAll(); + } catch (IOException e) { + this.cacheIsOK = false; + logger.warn("Cannot use cache directory", e); + } + + // check if we have enough space : + if (getFreeSpace() < (maxCacheSize * 2)) { + cacheIsOK = false; + logger.warn("Not enough space for the cache"); + } + logger.debug("Using cache folder '{}'", cacheFolder); + } + + private void cleanCacheDirectory() throws IOException { + try { + List<@Nullable Path> filesInCacheFolder = Files.list(cacheFolder).collect(Collectors.toList()); + + // 1 delete empty files + Iterator<@Nullable Path> fileDeleterIterator = filesInCacheFolder.iterator(); + while (fileDeleterIterator.hasNext()) { + Path path = fileDeleterIterator.next(); + if (path != null) { + File file = path.toFile(); + if (file.length() == 0) { + file.delete(); + String fileName = path.getFileName().toString(); + storage.remove(fileName); + fileDeleterIterator.remove(); + } + } + } + + // 2 clean orphan (part of a pair (file + metadata) without a corresponding partner) + // 2-a delete a file without its metadata + for (Path path : filesInCacheFolder) { + if (path != null) { + String fileName = path.getFileName().toString(); + // check corresponding metadata in storage + V metadata = storage.get(fileName); + if (metadata == null) { + Files.delete(path); + } + } + } + // 2-b delete metadata without corresponding file + for (Entry entry : storage.stream().toList()) { + Path correspondingFile = cacheFolder.resolve(entry.getKey()); + if (!Files.exists(correspondingFile)) { + storage.remove(entry.getKey()); + } + } + } catch (IOException e) { + logger.warn("Cannot load the cache directory : {}", e.getMessage()); + cacheIsOK = false; + return; + } + } + + private long getFreeSpace() { + try { + Path rootPath = Paths.get(new URI("file:///")); + Path dirPath = rootPath.resolve(cacheFolder.getParent()); + FileStore dirFileStore = Files.getFileStore(dirPath); + return dirFileStore.getUsableSpace(); + } catch (URISyntaxException | IOException e) { + logger.error("Cannot compute free disk space for the cache. Reason: {}", e.getMessage()); + return 0; + } + } + + /** + * Returns a {@link LRUMediaCacheEntry} from the cache, or if not already in the cache : + * resolve it, stores it, and returns it. + * key A unique key identifying the result + * supplier the data and metadata supplier. It is OK to launch a DataRetrievalException from this, as it will be + * rethrown. + */ + public LRUMediaCacheEntry get(String key, Supplier> supplier) + throws DataRetrievalException { + if (!cacheIsOK) { + return supplier.get(); + } + + // we use a lock with fine granularity, by key, to not lock the entire cache + // when resolving the supplier (which could be time consuming) + Lock lockForCurrentEntry = lockByEntry.computeIfAbsent(key, k -> new ReentrantLock()); + if (lockForCurrentEntry == null) { + cacheIsOK = false; + logger.error("Cannot compute lock within cache system. Shouldn't happen"); + return supplier.get(); + } + lockForCurrentEntry.lock(); + try { + // try to get from cache + LRUMediaCacheEntry result = cachedResults.get(key); + if (result != null && result.isFaulty()) { // if previously marked as faulty + result.deleteFile(); + cachedResults.remove(key); + result = null; + } + if (result == null) { // it's a cache miss or a faulty result, we must (re)create it + logger.debug("Cache miss {}", key); + result = supplier.get(); + put(result); + } + return result; + } finally { + lockForCurrentEntry.unlock(); + } + } + + protected void put(LRUMediaCacheEntry result) { + result.setCacheContext(cacheFolder, storage); + cachedResults.put(result.getKey(), result); + makeSpace(); + } + + /** + * Load all {@link LRUMediaCacheEntry} cached to the disk. + */ + private void loadAll() throws IOException { + cachedResults.clear(); + storage.stream().map(entry -> new LRUMediaCacheEntry(entry.getKey())).forEach(this::put); + } + + /** + * Check if the cache is not already full and make space if needed. + * We don't use the removeEldestEntry test method from the linkedHashMap because it can only remove one element. + */ + protected void makeSpace() { + synchronized (cachedResults) { + Iterator<@Nullable LRUMediaCacheEntry> iterator = cachedResults.values().iterator(); + Long currentCacheSize = cachedResults.values().stream() + .map(result -> result == null ? 0 : result.getCurrentSize()).reduce(0L, (Long::sum)); + int attemptToDelete = 0; + while (currentCacheSize > maxCacheSize && cachedResults.size() > 1 && attemptToDelete < 10) { + attemptToDelete++; + LRUMediaCacheEntry oldestEntry = iterator.next(); + if (oldestEntry != null) { + oldestEntry.deleteFile(); + currentCacheSize -= oldestEntry.getCurrentSize(); + lockByEntry.remove(oldestEntry.getKey()); + } + iterator.remove(); + } + } + } +} diff --git a/bundles/org.openhab.core/src/main/java/org/openhab/core/cache/lru/LRUMediaCacheEntry.java b/bundles/org.openhab.core/src/main/java/org/openhab/core/cache/lru/LRUMediaCacheEntry.java new file mode 100644 index 00000000000..6d8a0bb329a --- /dev/null +++ b/bundles/org.openhab.core/src/main/java/org/openhab/core/cache/lru/LRUMediaCacheEntry.java @@ -0,0 +1,351 @@ +/** + * Copyright (c) 2010-2023 Contributors to the openHAB project + * + * See the NOTICE file(s) distributed with this work for additional + * information. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.openhab.core.cache.lru; + +import java.io.File; +import java.io.IOException; +import java.io.InputStream; +import java.nio.ByteBuffer; +import java.nio.channels.FileChannel; +import java.nio.file.Path; +import java.nio.file.StandardOpenOption; +import java.util.EnumSet; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; + +import org.eclipse.jdt.annotation.NonNullByDefault; +import org.eclipse.jdt.annotation.Nullable; +import org.openhab.core.storage.Storage; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * A cached media entry resulting from a call to a supplier or a load from disk + * This class also adds the capability to serve multiple InputStream concurrently + * without asking already retrieved data to the wrapped stream. + * + * @author Gwendal Roulleau - Initial contribution + */ +@NonNullByDefault +public class LRUMediaCacheEntry { + + private final Logger logger = LoggerFactory.getLogger(LRUMediaCacheEntry.class); + + /** + * Arbitrary chunk size. Small is less latency but more small calls and CPU load. + */ + private static final int CHUNK_SIZE = 10000; + + /** + * Take count of the number of {@link InputStreamCacheWrapper} currently using this {@link LRUMediaCacheEntry} + */ + private int countStreamClient = 0; + + /** + * A unique key to identify the result + * (used to build the filename) + */ + private final String key; + + // The inner InputStream + private @Nullable InputStream inputStream; + private @Nullable V metadata; + + // The data file where the media is stored: + private @Nullable File file; + // optional metadata is stored here: + private @Nullable Storage storage; + + protected long currentSize = 0; + private boolean completed; + private boolean faulty = false; + + private @Nullable FileChannel fileChannel; + private final Lock fileOperationLock = new ReentrantLock(); + + /** + * This constructor is used when the file is fully cached on disk. + * The file on disk will provide the data, and the storage will + * provide metadata. + * + * @param key A unique key to identify the produced data + */ + public LRUMediaCacheEntry(String key) { + this.key = key; + this.completed = true; + } + + /** + * This constructor is used when the file is not yet cached on disk. + * Data is provided by the arguments + * + * @param key A unique key to identify the produced data + * @param inputStream The data stream + * @param metadata optional metadata to store along the stream + */ + public LRUMediaCacheEntry(String key, InputStream inputStream, @Nullable V metadata) { + this.key = key; + this.inputStream = inputStream; + this.metadata = metadata; + this.completed = false; + } + + /** + * Link this cache entry to the underlying storage facility (disk for data, storage service for metadata) + * + * @param cacheDirectory + * @param storage + */ + protected void setCacheContext(Path cacheDirectory, Storage storage) { + File fileLocal = cacheDirectory.resolve(key).toFile(); + this.file = fileLocal; + this.storage = storage; + V actualDataInStorage = storage.get(key); + if (actualDataInStorage == null) { + storage.put(key, metadata); + } else { + this.metadata = actualDataInStorage; + } + this.currentSize = fileLocal.length(); + } + + /** + * Get total size of the underlying stream. + * If not already completed, will query the stream inside, + * or get all the data. + * + * @return + */ + protected Long getTotalSize() { + if (completed) { // we already know the total size of the sound + return currentSize; + } else { + // we must force-read all the stream to get the real size + try { + read(0, Integer.MAX_VALUE); + } catch (IOException e) { + logger.debug("Cannot read the total size of the cache result. Using 0", e); + } + return currentSize; + } + } + + /** + * Get the current size + * + * @return + */ + protected long getCurrentSize() { + return currentSize; + } + + protected String getKey() { + return key; + } + + /** + * Open an InputStream wrapped around the file + * There could be several clients InputStream on the same cache result + * + * @return A new InputStream with data from the cache + * @throws IOException + */ + public InputStream getInputStream() throws IOException { + + File localFile = file; + if (localFile == null) { // the cache entry is not tied to the disk. The cache is not ready or not to be used. + InputStream inputStreamLocal = inputStream; + if (inputStreamLocal != null) { + return inputStreamLocal; + } else { + throw new IllegalStateException( + "Shouldn't happen. This cache entry is not tied to a file on disk and the inner input stream is null."); + } + } + logger.debug("Trying to open a cache inputstream for {}", localFile.getName()); + + fileOperationLock.lock(); + try { + countStreamClient++; + // we could have to open the fileChannel + FileChannel fileChannelLocal = fileChannel; + if (fileChannelLocal == null || !localFile.exists()) { + fileChannelLocal = FileChannel.open(localFile.toPath(), + EnumSet.of(StandardOpenOption.CREATE, StandardOpenOption.READ, StandardOpenOption.WRITE)); + fileChannel = fileChannelLocal; + // if the file size is 0 but the completed boolean is true, THEN it means the file have + // been deleted. We must mark the file as to be recreated : + if (completed && fileChannelLocal.size() == 0) { + logger.debug("The cached file {} is not present anymore. We will have to recreate it", + localFile.getName()); + this.faulty = true; + } + } + } finally { + fileOperationLock.unlock(); + } + return new InputStreamCacheWrapper(this); + } + + /** + * This method is called by a wrapper when it has been closed by a client + * The file and the inner stream could then be closed, if and only if no other client are accessing it. + * + * @throws IOException + */ + protected void closeStreamClient() throws IOException { + File fileLocal = file; + if (fileLocal == null) { + logger.debug("Trying to close a non existent-file. Is there a bug"); + return; + } + logger.debug("Trying to close a cached inputstream client for {}", fileLocal.getName()); + fileOperationLock.lock(); + try { + countStreamClient--; + if (countStreamClient <= 0) {// no more client reading or writing : closing the filechannel + try { + FileChannel fileChannelLocal = fileChannel; + if (fileChannelLocal != null) { + try { + logger.debug("Effectively close the cache filechannel for {}", fileLocal.getName()); + fileChannelLocal.close(); + } finally { + fileChannel = null; + } + } + } finally { + InputStream inputStreamLocal = inputStream; + if (inputStreamLocal != null) { + inputStreamLocal.close(); + } + } + } + } finally { + fileOperationLock.unlock(); + } + } + + /** + * Get metadata for this cache result. + * + * @return metadata + */ + public @Nullable V getMetadata() { + return this.metadata; + } + + /** + * Read from the cached file. If there is not enough bytes to read in the file, the supplier will be queried. + * + * @param start The offset to read the file from + * @param sizeToRead the number of byte to read + * @return A byte array from the file. The size may or may not be the sizeToRead requested + * @throws IOException + */ + protected byte[] read(int start, int sizeToRead) throws IOException { + FileChannel fileChannelLocal = fileChannel; + if (fileChannelLocal == null || isFaulty()) { + throw new IOException("Cannot read cache from null file channel or deleted file."); + } + + // check if we need to get data from the inner stream. + if (start + sizeToRead > fileChannelLocal.size() && !completed) { + logger.trace("Maybe need to get data from inner stream"); + // try to get new bytes from the inner stream + InputStream streamLocal = inputStream; + if (streamLocal != null) { + logger.trace("Trying to synchronize for reading inner inputstream"); + synchronized (streamLocal) { + // now that we really have the lock, test again if we really need data from the stream + while (start + sizeToRead > fileChannelLocal.size() && !completed) { + logger.trace("Really need to get data from inner stream"); + byte[] readFromSupplierStream = streamLocal.readNBytes(CHUNK_SIZE); + if (readFromSupplierStream.length == 0) { // we read all the stream + logger.trace("End of the stream reached"); + completed = true; + } else { + fileChannelLocal.write(ByteBuffer.wrap(readFromSupplierStream), currentSize); + logger.trace("writing {} bytes to {}", readFromSupplierStream.length, key); + currentSize += readFromSupplierStream.length; + } + } + } + } else { + faulty = true; + logger.warn("Shouldn't happen : trying to get data from upstream for {} but original stream is null", + key); + } + } + // the cache file is now filled, get bytes from it. + long maxToRead = Math.min(currentSize, sizeToRead); + ByteBuffer byteBufferFromChannelFile = ByteBuffer.allocate((int) maxToRead); + int byteReadNumber = fileChannelLocal.read(byteBufferFromChannelFile, Integer.valueOf(start).longValue()); + logger.trace("Read {} bytes from the filechannel", byteReadNumber); + if (byteReadNumber > 0) { + byte[] resultByteArray = new byte[byteReadNumber]; + byteBufferFromChannelFile.rewind(); + byteBufferFromChannelFile.get(resultByteArray); + return resultByteArray; + } else { + return new byte[0]; + } + } + + /** + * Return the number of bytes that we can actually read without calling + * the underlying stream + * + * @param offset + * @return + */ + protected int availableFrom(int offset) { + FileChannel fileChannelLocal = fileChannel; + if (fileChannelLocal == null) { + return 0; + } + try { + return Math.max(0, Long.valueOf(fileChannelLocal.size() - offset).intValue()); + } catch (IOException e) { + logger.debug("Cannot get file length for cache file {}", key); + return 0; + } + } + + protected void deleteFile() { + logger.debug("Receiving call to delete the cache file {}", key); + fileOperationLock.lock(); + try { + // check if a client is actually reading the file + if (countStreamClient <= 0) { + logger.debug("Effectively deleting the cached file {}", key); + // delete the file : + File fileLocal = file; + if (fileLocal != null) { + fileLocal.delete(); + } + // and the associated info + Storage storageLocal = storage; + if (storageLocal != null) { + storageLocal.remove(key); + } + } + } finally { + fileOperationLock.unlock(); + } + } + + public boolean isFaulty() { + return faulty; + } +} diff --git a/bundles/org.openhab.core/src/test/java/org/openhab/core/cache/lru/InputStreamCacheWrapperTest.java b/bundles/org.openhab.core/src/test/java/org/openhab/core/cache/lru/InputStreamCacheWrapperTest.java new file mode 100644 index 00000000000..19b46e2cfc5 --- /dev/null +++ b/bundles/org.openhab.core/src/test/java/org/openhab/core/cache/lru/InputStreamCacheWrapperTest.java @@ -0,0 +1,78 @@ +/** + * Copyright (c) 2010-2023 Contributors to the openHAB project + * + * See the NOTICE file(s) distributed with this work for additional + * information. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.openhab.core.cache.lru; + +import static org.junit.jupiter.api.Assertions.*; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.Mockito.*; + +import java.io.IOException; + +import org.eclipse.jdt.annotation.NonNullByDefault; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; + +/** + * Test the wrapper stream in the cache system + * + * @author Gwendal Roulleau - Initial contribution + */ +@NonNullByDefault +@ExtendWith(MockitoExtension.class) +public class InputStreamCacheWrapperTest { + + private @Mock @NonNullByDefault({}) LRUMediaCacheEntry cacheEntry; + + /** + * Test the read() method + * + * @throws IOException + */ + @Test + public void cacheWrapperStreamTest() throws IOException { + when(cacheEntry.read(0, 1)).thenReturn(new byte[] { 1 }); + when(cacheEntry.read(1, 1)).thenReturn(new byte[] { 2 }); + when(cacheEntry.read(2, 1)).thenReturn(new byte[] { 3 }); + when(cacheEntry.read(3, 1)).thenReturn(new byte[0]); + + try (InputStreamCacheWrapper audioStreamCacheWrapper = new InputStreamCacheWrapper(cacheEntry)) { + assertEquals(1, audioStreamCacheWrapper.read()); + assertEquals(2, audioStreamCacheWrapper.read()); + assertEquals(3, audioStreamCacheWrapper.read()); + assertEquals(-1, audioStreamCacheWrapper.read()); + } + + verify(cacheEntry, times(4)).read(anyInt(), anyInt()); + verify(cacheEntry).closeStreamClient(); + verifyNoMoreInteractions(cacheEntry); + } + + /** + * Test the read by batch method + * + * @throws IOException + */ + @Test + public void cacheWrapperStreamReadBunchTest() throws IOException { + when(cacheEntry.read(anyInt(), anyInt())).thenReturn(new byte[] { 1 }, new byte[] { 2, 3 }, new byte[0]); + + try (InputStreamCacheWrapper audioStreamCacheWrapper = new InputStreamCacheWrapper(cacheEntry)) { + assertArrayEquals(new byte[] { 1, 2, 3 }, audioStreamCacheWrapper.readAllBytes()); + } + verify(cacheEntry, times(3)).read(anyInt(), anyInt()); + verify(cacheEntry).closeStreamClient(); + verifyNoMoreInteractions(cacheEntry); + } +} diff --git a/bundles/org.openhab.core/src/test/java/org/openhab/core/cache/lru/LRUMediaCacheEntryTest.java b/bundles/org.openhab.core/src/test/java/org/openhab/core/cache/lru/LRUMediaCacheEntryTest.java new file mode 100644 index 00000000000..b1cd712b165 --- /dev/null +++ b/bundles/org.openhab.core/src/test/java/org/openhab/core/cache/lru/LRUMediaCacheEntryTest.java @@ -0,0 +1,301 @@ +/** + * Copyright (c) 2010-2023 Contributors to the openHAB project + * + * See the NOTICE file(s) distributed with this work for additional + * information. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.openhab.core.cache.lru; + +import static org.junit.jupiter.api.Assertions.*; +import static org.mockito.Mockito.*; + +import java.io.ByteArrayInputStream; +import java.io.IOException; +import java.io.InputStream; +import java.nio.charset.StandardCharsets; +import java.nio.file.Path; +import java.util.Arrays; +import java.util.List; +import java.util.Random; +import java.util.function.Supplier; +import java.util.stream.Collectors; + +import org.eclipse.jdt.annotation.NonNullByDefault; +import org.eclipse.jdt.annotation.Nullable; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.api.io.TempDir; +import org.mockito.Mock; +import org.mockito.junit.jupiter.MockitoExtension; +import org.openhab.core.OpenHAB; +import org.openhab.core.storage.Storage; +import org.openhab.core.storage.StorageService; + +/** + * Test the cache system + * + * @author Gwendal Roulleau - Initial contribution + */ +@ExtendWith(MockitoExtension.class) +@NonNullByDefault +public class LRUMediaCacheEntryTest { + + private @TempDir @NonNullByDefault({}) Path tempDir; + + private @Mock @NonNullByDefault({}) LRUMediaCache ttsServiceMock; + private @Mock @NonNullByDefault({}) Supplier> supplier; + + private @NonNullByDefault({}) @Mock StorageService storageService; + private @NonNullByDefault({}) @Mock Storage storage; + + @BeforeEach + public void init() { + System.setProperty(OpenHAB.USERDATA_DIR_PROG_ARGUMENT, tempDir.toString()); + storageService = new StorageService() { + @SuppressWarnings("unchecked") + @Override + public Storage getStorage(String name, @Nullable ClassLoader classLoader) { + return storage; + } + + @SuppressWarnings("unchecked") + @Override + public Storage getStorage(String name) { + return storage; + } + }; + } + + private LRUMediaCache createCache(long size) throws IOException { + LRUMediaCache voiceLRUCache = new LRUMediaCache(storageService, size, + "lrucachetest.pid", this.getClass().getClassLoader()); + return voiceLRUCache; + } + + public static class FakeStream extends InputStream { + + ByteArrayInputStream innerInputStream; + private boolean closed = false; + + public FakeStream(byte[] byteToReturn) { + innerInputStream = new ByteArrayInputStream(byteToReturn); + } + + @Override + public int read() throws IOException { + return innerInputStream.read(); + } + + @Override + public void close() throws IOException { + this.closed = true; + innerInputStream.close(); + } + + public boolean isClosed() { + return closed; + } + } + + /** + * Get twice the InputStream with only one call. + * + * @throws IOException + */ + @Test + public void getInputStreamTwiceWithOnlyOneCallToTheSupplierAndCompareTest() throws IOException { + LRUMediaCache lruMediaCache = createCache(1000); + + InputStream fakeStream = new FakeStream("This is a false string to simulate some data".getBytes()); + MetadataSample metadata = new MetadataSample("meta1", 42); + when(supplier.get()).thenReturn(new LRUMediaCacheEntry<>("key1", fakeStream, metadata)); + + LRUMediaCacheEntry lruMediaCacheEntry = lruMediaCache.get("key1", supplier); + + // get InputStream wrapped by the cache system + InputStream actualAudioStream = lruMediaCacheEntry.getInputStream(); + String actuallyRead = new String(actualAudioStream.readAllBytes(), StandardCharsets.UTF_8); + actualAudioStream.close(); + // ensure that the data are not corrupted + assertEquals("This is a false string to simulate some data", actuallyRead); + + // get again the InputStream wrapped by the cache system + actualAudioStream = lruMediaCacheEntry.getInputStream(); + actuallyRead = new String(actualAudioStream.readAllBytes(), StandardCharsets.UTF_8); + actualAudioStream.close(); + // ensure that the data are not corrupted + assertEquals("This is a false string to simulate some data", actuallyRead); + + // Ensure the TTS service was called only once : + verify(supplier, times(1)).get(); + } + + /** + * Test that the service can handle several calls concurrently and get the result only once + * + * @throws IOException + */ + @Test + public void loadTwoStreamsAtTheSameTimeFromTheSameSupplierTest() throws IOException { + LRUMediaCache lruMediaCache = createCache(1000); + + // init simulated data stream + FakeStream fakeStream = new FakeStream(new byte[] { 1, 2, 3, 4, 5, 6, 7, 8 }); + MetadataSample metadata = new MetadataSample("meta1", 42); + when(supplier.get()).thenReturn(new LRUMediaCacheEntry<>("key1", fakeStream, metadata)); + + LRUMediaCacheEntry lruMediaCacheEntry = lruMediaCache.get("key1", supplier); + + // get a first InputStream wrapped by the cache system + InputStream actualAudioStream1 = lruMediaCacheEntry.getInputStream(); + + // get a second, concurrent, InputStream wrapped by the cache system + InputStream actualAudioStream2 = lruMediaCacheEntry.getInputStream(); + + // read bytes from the two stream concurrently + byte[] byteReadFromStream1 = actualAudioStream1.readNBytes(4); + byte[] byteReadFromStream2 = actualAudioStream2.readNBytes(4); + + assertArrayEquals(new byte[] { 1, 2, 3, 4 }, byteReadFromStream1); + assertArrayEquals(new byte[] { 1, 2, 3, 4 }, byteReadFromStream2); + + // second read, from the two stream concurrently + byteReadFromStream1 = actualAudioStream1.readNBytes(4); + byteReadFromStream2 = actualAudioStream2.readNBytes(4); + + actualAudioStream1.close(); + assertFalse(fakeStream.isClosed()); // not closed because there is still one open + actualAudioStream2.close(); + assertTrue(fakeStream.isClosed()); // all client closed, the main stream should also be closed + + assertArrayEquals(new byte[] { 5, 6, 7, 8 }, byteReadFromStream1); + assertArrayEquals(new byte[] { 5, 6, 7, 8 }, byteReadFromStream1); + + // we call the TTS service only once + verify(supplier, times(1)).get(); + verifyNoMoreInteractions(supplier); + } + + /** + * Test that the service can handle several calls concurrently in two threads and get the result only once + * + * @throws IOException + */ + @Test + public void loadTwoThreadsAtTheSameTimeFromTheSameSupplierTest() throws IOException { + LRUMediaCache lruMediaCache = createCache(1000); + + // init simulated data stream + byte[] randomData = getRandomData(10 * 10240); + FakeStream fakeStream = new FakeStream(randomData); + MetadataSample metadata = new MetadataSample("meta1", 42); + when(supplier.get()).thenReturn(new LRUMediaCacheEntry<>("key1", fakeStream, metadata)); + + LRUMediaCacheEntry lruMediaCacheEntry = lruMediaCache.get("key1", supplier); + + // get a first InputStream wrapped by the cache system + InputStream actualAudioStream1 = lruMediaCacheEntry.getInputStream(); + + // get a second, concurrent, InputStream wrapped by the cache system + InputStream actualAudioStream2 = lruMediaCacheEntry.getInputStream(); + + // read bytes from the two stream concurrently + List parallelAudioStreamList = Arrays.asList(actualAudioStream1, actualAudioStream2); + List bytesResultList = parallelAudioStreamList.parallelStream().map(stream -> readSafe(stream)) + .collect(Collectors.toList()); + + assertArrayEquals(randomData, bytesResultList.get(0)); + assertArrayEquals(randomData, bytesResultList.get(1)); + + actualAudioStream1.close(); + assertFalse(fakeStream.isClosed()); // not closed because there is still one open + actualAudioStream2.close(); + assertTrue(fakeStream.isClosed()); // all client closed, the main stream should also be closed + + // we call the TTS service only once + verify(supplier).get(); + verifyNoMoreInteractions(ttsServiceMock); + } + + private byte[] readSafe(InputStream InputStream) { + try { + return InputStream.readAllBytes(); + } catch (IOException e) { + return new byte[0]; + } + } + + private byte[] getRandomData(int length) { + Random random = new Random(); + byte[] randomBytes = new byte[length]; + random.nextBytes(randomBytes); + return randomBytes; + } + + @SuppressWarnings("null") + @Test + public void streamAndMetadataTest() throws IOException { + LRUMediaCache lruMediaCache = createCache(1000); + + // init simulated data stream + byte[] randomData = getRandomData(2 * 10240); + + FakeStream fakeStream = new FakeStream(randomData); + MetadataSample metadata = new MetadataSample("meta1", 42); + when(supplier.get()).thenReturn(new LRUMediaCacheEntry<>("key1", fakeStream, metadata)); + when(storage.get("key1")).thenReturn(metadata); + + LRUMediaCacheEntry lruMediaCacheEntry = lruMediaCache.get("key1", supplier); + + InputStream audioStreamClient = lruMediaCacheEntry.getInputStream(); + byte[] bytesRead = audioStreamClient.readAllBytes(); + audioStreamClient.close(); + assertTrue(fakeStream.isClosed()); + + assertEquals(metadata.meta1, lruMediaCacheEntry.getMetadata().meta1); + assertEquals(metadata.meta2, lruMediaCacheEntry.getMetadata().meta2); + assertArrayEquals(randomData, bytesRead); + } + + /** + * Get the total length of the stream by forcing it to read everything + * + * @throws TTSException + * @throws IOException + */ + @Test + public void getTotalSizeTest() throws IOException { + + LRUMediaCache lruMediaCache = createCache(1000); + + // init simulated data stream + byte[] randomData = getRandomData(10 * 10240); + FakeStream fakeStream = new FakeStream(randomData); + MetadataSample metadata = new MetadataSample("meta1", 42); + when(supplier.get()).thenReturn(new LRUMediaCacheEntry<>("key1", fakeStream, metadata)); + + LRUMediaCacheEntry lruMediaCacheEntry = lruMediaCache.get("key1", supplier); + + InputStream audioStreamClient = lruMediaCacheEntry.getInputStream(); + Long totalSize = lruMediaCacheEntry.getTotalSize(); + audioStreamClient.close(); + assertEquals(10 * 10240, totalSize); + } + + private static class MetadataSample { + protected String meta1; + protected int meta2; + + public MetadataSample(String meta1, int meta2) { + this.meta1 = meta1; + this.meta2 = meta2; + } + } +} diff --git a/bundles/org.openhab.core/src/test/java/org/openhab/core/cache/lru/LRUMediaCacheTest.java b/bundles/org.openhab.core/src/test/java/org/openhab/core/cache/lru/LRUMediaCacheTest.java new file mode 100644 index 00000000000..20690ec38a8 --- /dev/null +++ b/bundles/org.openhab.core/src/test/java/org/openhab/core/cache/lru/LRUMediaCacheTest.java @@ -0,0 +1,461 @@ +/** + * Copyright (c) 2010-2023 Contributors to the openHAB project + * + * See the NOTICE file(s) distributed with this work for additional + * information. + * + * This program and the accompanying materials are made available under the + * terms of the Eclipse Public License 2.0 which is available at + * http://www.eclipse.org/legal/epl-2.0 + * + * SPDX-License-Identifier: EPL-2.0 + */ +package org.openhab.core.cache.lru; + +import static org.junit.jupiter.api.Assertions.*; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.*; + +import java.io.File; +import java.io.FileWriter; +import java.io.IOException; +import java.io.InputStream; +import java.nio.file.Files; +import java.nio.file.Path; +import java.util.AbstractMap; +import java.util.function.Supplier; +import java.util.stream.Stream; + +import org.eclipse.jdt.annotation.NonNullByDefault; +import org.eclipse.jdt.annotation.Nullable; +import org.junit.jupiter.api.BeforeEach; +import org.junit.jupiter.api.Test; +import org.junit.jupiter.api.extension.ExtendWith; +import org.junit.jupiter.api.io.TempDir; +import org.mockito.Mock; +import org.mockito.Mockito; +import org.mockito.junit.jupiter.MockitoExtension; +import org.openhab.core.OpenHAB; +import org.openhab.core.storage.Storage; +import org.openhab.core.storage.StorageService; + +/** + * Test the cache system + * + * @author Gwendal Roulleau - Initial contribution + */ +@ExtendWith(MockitoExtension.class) +@NonNullByDefault +public class LRUMediaCacheTest { + + private @TempDir @NonNullByDefault({}) Path tempDir; + + private @Mock @NonNullByDefault({}) InputStream inputStreamMock; + private @Mock @NonNullByDefault({}) Supplier> supplier; + + private @NonNullByDefault({}) @Mock StorageService storageService; + private @NonNullByDefault({}) @Mock Storage storage; + + @BeforeEach + public void init() { + storageService = new StorageService() { + @SuppressWarnings("unchecked") + @Override + public Storage getStorage(String name, @Nullable ClassLoader classLoader) { + return storage; + } + + @SuppressWarnings("unchecked") + @Override + public Storage getStorage(String name) { + return storage; + } + }; + System.setProperty(OpenHAB.USERDATA_DIR_PROG_ARGUMENT, tempDir.toString()); + } + + private LRUMediaCache createCache(long size) throws IOException { + LRUMediaCache voiceLRUCache = new LRUMediaCache(storageService, size, + "lrucachetest.pid", this.getClass().getClassLoader()); + return voiceLRUCache; + } + + /** + * Basic get and set for the LRU cache + * + * @throws IOException + */ + @Test + public void simpleLRUPutAndGetTest() throws IOException { + LRUMediaCache lruCache = createCache(10); + LRUMediaCacheEntry cacheEntry = new LRUMediaCacheEntry<>("key1"); + lruCache.put(cacheEntry); + assertEquals(cacheEntry, lruCache.cachedResults.get("key1")); + assertEquals(null, lruCache.cachedResults.get("key2")); + } + + /** + * Test the LRU eviction policy + * + * @throws IOException + */ + @Test + public void putAndGetAndEvictionOrderLRUTest() throws IOException { + LRUMediaCache lruCache = createCache(10240); + LRUMediaCacheEntry cacheEntry1 = new LRUMediaCacheEntry<>("key1"); + cacheEntry1.setCacheContext(tempDir, storage); + cacheEntry1.currentSize = 4 * 1024; + lruCache.cachedResults.put(cacheEntry1.getKey(), cacheEntry1); + LRUMediaCacheEntry cacheEntry2 = new LRUMediaCacheEntry<>("key2"); + cacheEntry2.setCacheContext(tempDir, storage); + cacheEntry2.currentSize = 4 * 1024; + lruCache.cachedResults.put(cacheEntry2.getKey(), cacheEntry2); + LRUMediaCacheEntry cacheEntry3 = new LRUMediaCacheEntry<>("key3"); + cacheEntry3.setCacheContext(tempDir, storage); + cacheEntry3.currentSize = 2 * 1024; + lruCache.cachedResults.put(cacheEntry3.getKey(), cacheEntry3); + LRUMediaCacheEntry cacheEntry4 = new LRUMediaCacheEntry<>("key4"); + cacheEntry4.setCacheContext(tempDir, storage); + cacheEntry4.currentSize = 4 * 1024; + lruCache.cachedResults.put(cacheEntry4.getKey(), cacheEntry4); + + lruCache.makeSpace(); + // cacheEntry1 should be evicted now (size limit is 10, and effective size is 12 when we try to put the + // cacheEntry4) + assertEquals(null, lruCache.cachedResults.get("key1")); + + // getting cacheEntry2 will put it in head, cacheEntry3 is now tail + assertEquals(cacheEntry2, lruCache.cachedResults.get("key2")); + + // putting again cacheEntry1 should expel tail, which is cacheEntry3 + lruCache.cachedResults.put(cacheEntry1.getKey(), cacheEntry1); + lruCache.makeSpace(); + assertEquals(null, lruCache.cachedResults.get("key3")); + } + + /** + * Test the file deletion + * + * @throws IOException + */ + @Test + public void fileDeletionTest() throws IOException { + LRUMediaCacheEntry cacheEntry1 = new LRUMediaCacheEntry<>("key1"); + cacheEntry1.setCacheContext(tempDir, storage); + + File result1File = tempDir.resolve(cacheEntry1.getKey()).toFile(); + result1File.createNewFile(); + MetadataSample metadataInfo = new MetadataSample("text", 3); + storage.put("key1", metadataInfo); + + cacheEntry1.deleteFile(); + + assertFalse(result1File.exists()); + assertNull(storage.get("key1")); + } + + /** + * Test that the eviction policy calls the delete method + * + * @throws IOException + */ + @Test + public void fileDeletionWhenEvictionTest() throws IOException { + LRUMediaCache voiceLRUCache = createCache(10240); + + @SuppressWarnings("unchecked") + LRUMediaCacheEntry entryToEvict = Mockito.mock(LRUMediaCacheEntry.class); + when(entryToEvict.getCurrentSize()).thenReturn(15L * 1024); + when(entryToEvict.getKey()).thenReturn("resultToEvict"); + voiceLRUCache.put(entryToEvict); + + // the cache is already full, so the next put will delete resultToEvict + LRUMediaCacheEntry cacheEntry2 = new LRUMediaCacheEntry<>("key2"); + cacheEntry2.currentSize = 4 * 1024; + voiceLRUCache.put(cacheEntry2); + + verify(entryToEvict).deleteFile(); + } + + /** + * This test checks than we can overwrite a previously + * cached entry and then it is now at the head position. + * + * @throws IOException + */ + @Test + public void putExistingResultLRUTest() throws IOException { + LRUMediaCache lruCache = createCache(10240); + LRUMediaCacheEntry cacheEntry = new LRUMediaCacheEntry<>("key1"); + cacheEntry.currentSize = 4 * 1024; + lruCache.cachedResults.put(cacheEntry.getKey(), cacheEntry); + LRUMediaCacheEntry cacheEntry2 = new LRUMediaCacheEntry<>("key2"); + cacheEntry2.currentSize = 10 * 1024; + lruCache.cachedResults.put(cacheEntry2.getKey(), cacheEntry2); + + // put again key1 --> key2 is now tail + lruCache.cachedResults.put(cacheEntry.getKey(), cacheEntry); + + LRUMediaCacheEntry cacheEntry3 = new LRUMediaCacheEntry<>("key3"); + cacheEntry3.currentSize = 4 * 1024; + lruCache.cachedResults.put(cacheEntry3.getKey(), cacheEntry3); + lruCache.makeSpace(); + + // key2 should be expelled now + assertEquals(null, lruCache.cachedResults.get("key2")); + + // key1 and key3 are a hit + assertEquals(cacheEntry, lruCache.cachedResults.get("key1")); + assertEquals(cacheEntry3, lruCache.cachedResults.get("key3")); + } + + /** + * Simulate a cache miss, then two other hits + * The supplier service is called only once + * + * @throws IOException + */ + @Test + public void getCacheMissAndHitTest() throws IOException { + MetadataSample metadata = new MetadataSample("meta1", 42); + + when(supplier.get()).thenReturn(new LRUMediaCacheEntry<>("key", inputStreamMock, metadata)); + // In this test the stream will return two bytes of data, then an empty stream so signal its end : + when(inputStreamMock.readNBytes(any(Integer.class))).thenReturn(new byte[2], new byte[0]); + + LRUMediaCache lruCache = createCache(1000); + + // first cache miss + LRUMediaCacheEntry resultStream = lruCache.get("key", supplier); + resultStream.getInputStream().readAllBytes(); + + // then cache hit + resultStream = lruCache.get("key", supplier); + resultStream.getInputStream().readAllBytes(); + + // then cache hit + resultStream = lruCache.get("key", supplier); + resultStream.getInputStream().readAllBytes(); + + // even with three call to get and getFormat, the service and the underlying stream were called + // only once : + verify(supplier, times(1)).get(); + // this is called twice because the second call respond with zero and signal the end of stream + verify(inputStreamMock, times(2)).readNBytes(any(Integer.class)); + } + + /** + * Load some cache entries from files on disk + * + * @throws IOException + */ + @SuppressWarnings({ "rawtypes", "unchecked", "null" }) + @Test + public void loadResultsFromCacheDirectoryTest() throws IOException { + // prepare cache directory + Path cacheDirectory = tempDir.resolve("cache/lrucachetest.pid/"); + Files.createDirectories(cacheDirectory); + + // prepare some files + File file1 = cacheDirectory.resolve("key1").toFile(); + MetadataSample metadataSample1 = new MetadataSample("text1", 1); + when(storage.get("key1")).thenReturn(metadataSample1); + try (FileWriter file1Writer = new FileWriter(file1)) { + file1Writer.write("falsedata"); + } + + // prepare some files + File file2 = cacheDirectory.resolve("key2").toFile(); + MetadataSample metadataSample2 = new MetadataSample("text2", 2); + when(storage.get("key2")).thenReturn(metadataSample2); + try (FileWriter file2Writer = new FileWriter(file2)) { + file2Writer.write("falsedata"); + } + when(storage.stream()) + .thenAnswer((invocation) -> Stream.of(new AbstractMap.SimpleImmutableEntry("key1", metadataSample1), + new AbstractMap.SimpleImmutableEntry("key2", metadataSample2))); + + // create a LRU cache that will use the above data + LRUMediaCache lruCache = createCache(20); + + LRUMediaCacheEntry result1 = lruCache.cachedResults.get("key1"); + assertNotNull(result1); + assertEquals(result1.getMetadata().getMeta2(), 1); + assertEquals(result1.getMetadata().getMeta1(), "text1"); + + LRUMediaCacheEntry result2 = lruCache.cachedResults.get("key2"); + assertNotNull(result2); + assertEquals(result2.getMetadata().getMeta1(), "text2"); + assertEquals(result2.getMetadata().getMeta2(), 2); + + LRUMediaCacheEntry result3 = lruCache.cachedResults.get("key3"); + assertNull(result3); + } + + @Test + public void enoughFreeDiskSpaceTest() throws IOException { + // arbitrary long value, should throw exception because no disk of this size exists + LRUMediaCache cache = createCache(Long.MAX_VALUE / 10); + assertFalse(cache.cacheIsOK); + } + + /** + * Test the deletion of orphaned element (either data or metadata) + */ + @SuppressWarnings({ "rawtypes", "unchecked" }) + @Test + public void cleanDirectoryOrphanFilesTest() throws IOException { + // prepare cache directory + Path cacheDirectory = tempDir.resolve("cache/lrucachetest.pid/"); + Files.createDirectories(cacheDirectory); + + // prepare some files : normal entry + File file1 = cacheDirectory.resolve("key1").toFile(); + MetadataSample metadataSample1 = new MetadataSample("text1", 1); + when(storage.get("key1")).thenReturn(metadataSample1); + try (FileWriter file1Writer = new FileWriter(file1)) { + file1Writer.write("falsedata"); + } + + // prepare some files : orphan info + MetadataSample metadataSample2 = new MetadataSample("text2", 2); + when(storage.get("key2")).thenReturn(metadataSample2); + + // prepare storage map for stream operation + when(storage.stream()) + .thenAnswer((invocation) -> Stream.of(new AbstractMap.SimpleImmutableEntry("key1", metadataSample1), + new AbstractMap.SimpleImmutableEntry("key2", metadataSample2))); + + // prepare some files : orphan file + File file3 = cacheDirectory.resolve("key3").toFile(); + when(storage.get("key3")).thenReturn(null); + try (FileWriter file3Writer = new FileWriter(file3)) { + file3Writer.write("fake non empty data"); + } + + // create a LRU cache that will use the above data + createCache(20); + + // the file for entry 1 still exists : + assertTrue(file1.exists()); + assertNotNull(storage.get("key1")); + + // the file for entry 2 should have been deleted : + verify(storage).remove("key2"); + + // the file for entry 3 should have been deleted : + assertFalse(file3.exists()); + } + + /** + * Test the deletion of matadata when file is empty + */ + @Test + public void cleanDirectoryEmptyFilesTest() throws IOException { + // prepare cache directory + Path cacheDirectory = tempDir.resolve("cache/lrucachetest.pid/"); + Files.createDirectories(cacheDirectory); + + // prepare some files : normal entry + File file1 = cacheDirectory.resolve("key1").toFile(); + when(storage.get("key1")).thenReturn(new MetadataSample("key1", 2)); + try (FileWriter file1Writer = new FileWriter(file1)) { + file1Writer.write("falsedata"); + } + + // prepare some files : empty file + File file2 = cacheDirectory.resolve("key2").toFile(); + file2.createNewFile(); + assertTrue(file2.exists()); + + // create a LRU cache that will load the above data + createCache(20); + + // the file for entry 1 still exists : + assertTrue(file1.exists()); + assertNotNull(storage.get("key1")); + + // the file for entry 2 should have been deleted (empty file) : + verify(storage).remove("key2"); + assertFalse(file2.exists()); + } + + /** + * Test a cache entry which has been deleted. It will be recreated + */ + @Test + public void faultyStreamTest() throws IOException { + MetadataSample metadata = new MetadataSample("meta1", 42); + + when(supplier.get()).thenAnswer((invocation) -> new LRUMediaCacheEntry<>("key", inputStreamMock, metadata)); + // In this test the stream will return two bytes of data, then an empty stream so signal its end. + // it will be called twice, so return it twice + when(inputStreamMock.readNBytes(any(Integer.class))).thenReturn(new byte[2], new byte[0], new byte[2], + new byte[0]); + + LRUMediaCache lruCache = createCache(1000); + + // first cache miss + LRUMediaCacheEntry resultEntry = lruCache.get("key", supplier); + InputStream resultInputStream = resultEntry.getInputStream(); + resultInputStream.readAllBytes(); + resultInputStream.close(); + + File dataFile = tempDir.resolve("cache/lrucachetest.pid").resolve("key").toFile(); + assertTrue(dataFile.exists()); + dataFile.delete(); + assertFalse(dataFile.exists()); + + // then cache hit + resultEntry = lruCache.get("key", supplier); + resultInputStream = resultEntry.getInputStream(); + + // but the result is faulty because file is missing + assertTrue(resultEntry.isFaulty()); + + // try to read but we got an exception + boolean exceptionCatched = false; + try { + resultInputStream.readAllBytes(); + } catch (IOException io) { + if ("Cannot read cache from null file channel or deleted file.".equals(io.getMessage())) { + exceptionCatched = true; + } + } + assertTrue(exceptionCatched); + resultInputStream.close(); + + // get it another time + resultEntry = lruCache.get("key", supplier); + // this time the result is not faulty anymore because it was computed another time + assertFalse(resultEntry.isFaulty()); + + resultInputStream = resultEntry.getInputStream(); + byte[] bytesRead = resultInputStream.readAllBytes(); + assertEquals(2, bytesRead.length); + resultInputStream.close(); + + // the service and the underlying stream were called twice because of a missing file: + verify(supplier, times(2)).get(); + verify(inputStreamMock, times(4)).readNBytes(any(Integer.class)); + } + + private static class MetadataSample { + + protected String meta1; + protected int meta2; + + public MetadataSample(String meta1, int meta2) { + this.meta1 = meta1; + this.meta2 = meta2; + } + + public String getMeta1() { + return meta1; + } + + public int getMeta2() { + return meta2; + } + } +}