Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,17 @@

package org.apache.hadoop.ozone.container.keyvalue.impl;

import static org.apache.hadoop.ozone.container.keyvalue.helpers.ChunkUtils.limitReadSize;

import java.io.IOException;
import java.nio.ByteBuffer;
import java.nio.MappedByteBuffer;
import java.nio.channels.FileChannel;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardOpenOption;
import java.util.Objects;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import org.apache.hadoop.hdds.client.BlockID;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
import org.apache.hadoop.ozone.common.ChunkBuffer;
import org.apache.hadoop.ozone.container.common.helpers.BlockData;
Expand All @@ -40,6 +46,49 @@
*/
public class ChunkManagerDummyImpl implements ChunkManager {

private static final int DEFAULT_MAP_SIZE = 1 * 1024 * 1024; // 1MB

private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
private volatile MappedByteBuffer mapped;
private volatile int mappedSize;
private volatile Path backingFile;

private void ensureMapped(int minSize)
throws StorageContainerException {
if (mapped != null && mappedSize >= minSize) {
return;
}

lock.writeLock().lock();
try {
if (mapped != null && mappedSize >= minSize) {
return;
}

int newSize = Math.max(DEFAULT_MAP_SIZE, minSize);
if (backingFile == null) {
backingFile = Files.createTempFile("ozone-dummy-chunk-", ".bin");
backingFile.toFile().deleteOnExit();
}

try (FileChannel ch = FileChannel.open(backingFile,
StandardOpenOption.READ, StandardOpenOption.WRITE)) {
if (ch.size() < newSize) {
ch.truncate(newSize);
}
Comment on lines +76 to +78
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Here, the code uses ch.truncate(newSize) to expand the file when ch.size() < newSize, but truncate() only reduces file size - it cannot expand files.

According to Java API:> "If the given size is greater than or equal to the file's current size then the file is not modified."
When the file is smaller than newSize, truncate() does nothing, leaving the file unchanged. This causes silent failures when read requests exceed the current mapped buffer size.

  1. Scenario 1: File is Smaller, Need Smaller Size (< 1MB)

Example:

  • Current file: 500 KB
  • Request: Read 256 KB
  • newSize = max(1MB, 256KB) = 1MB (because of DEFAULT_MAP_SIZE)
Current: ch.size() = 500 KB
Need:    newSize = 1 MB
Check:   Is 500 KB < 1 MB? → YES 
Action:  ch.truncate(1 MB)
Result:  ❌ truncate() does NOTHING (can't expand)
         File stays 500 KB, but we need 1 MB!

Problem: File doesn't expand to 1 MB, mapping fails or maps wrong size.

  1. Scenario 2: File is Larger, Need Smaller Size (< 1MB)

Example:

  • Current file: 2 MB
  • Request: Read 256 KB
  • newSize = max(1MB, 256KB) = 1MB
Current: ch.size() = 2 MB
Need:    newSize = 1 MB
Check:   Is 2 MB < 1 MB? → NO ❌
Action:  Skip truncate (condition false)
Result:  File is already big enough (2 MB > 1 MB)
         But we're mapping only 1 MB, so it works!

Status: Works (but wastes space - file is 2 MB but we only use 1 MB)

We need to take care of these scenarios.

if (ch.size() < newSize) {
  // LOGIC:  Need to EXPAND file 
} else if (ch.size() > newSize) {
  // Need to SHRINK file
  ch.truncate(newSize);  // This works for shrinking!
}

mapped = ch.map(FileChannel.MapMode.READ_ONLY, 0, newSize);
mappedSize = newSize;
}
} catch (IOException e) {
throw new StorageContainerException(
"Failed to create mapped buffer for dummy chunk reads",
e,
ContainerProtos.Result.IO_EXCEPTION);
} finally {
lock.writeLock().unlock();
}
}

@Override
public void writeChunk(Container container, BlockID blockID, ChunkInfo info,
ChunkBuffer data, DispatcherContext dispatcherContext)
Expand Down Expand Up @@ -72,9 +121,26 @@ public ChunkBuffer readChunk(Container container, BlockID blockID,
ChunkInfo info, DispatcherContext dispatcherContext)
throws StorageContainerException {

limitReadSize(info.getLen());
// stats are handled in ChunkManagerImpl
return ChunkBuffer.wrap(ByteBuffer.allocate((int) info.getLen()));
long lenL = info.getLen();
if (lenL > Integer.MAX_VALUE) {
throw new StorageContainerException(
"Chunk length too large: " + lenL, null);
}
int len = (int) lenL;

ensureMapped(len);

lock.readLock().lock();
try {
ByteBuffer dup = mapped.duplicate();
dup.position(0);
dup.limit(len);
ByteBuffer slice = dup.slice();

return ChunkBuffer.wrap(slice);
} finally {
lock.readLock().unlock();
}
}

@Override
Expand Down