Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: add support for AsyncWrite #16

Closed
wants to merge 5 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 4 additions & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -20,4 +20,7 @@ members = [
"hdfs",
"hdfs-examples",
"hdfs-testing",
]
]

[patch.crates-io]
object_store = { git = "https://github.com/apache/arrow-rs.git", rev = "bff6155d38e19bfe62a776731b78b435560f2c8e" }
2 changes: 1 addition & 1 deletion hdfs/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -46,5 +46,5 @@ chrono = { version = "0.4" }
fs-hdfs = { version = "^0.1.11", optional = true }
fs-hdfs3 = { version = "^0.1.11", optional = true }
futures = "0.3"
object_store = "0.6.1"
object_store = { version = "0.6.1", features = ["cloud"] }
tokio = { version = "1.18", features = ["macros", "rt", "rt-multi-thread", "sync", "parking_lot"] }
91 changes: 86 additions & 5 deletions hdfs/src/object_store/hdfs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,11 @@

//! Object store that represents the HDFS File System.

use std::collections::{BTreeSet, VecDeque};
use std::collections::{BTreeSet, HashMap, VecDeque};
use std::fmt::{Display, Formatter};
use std::ops::Range;
use std::path::PathBuf;
use std::sync::Arc;
use std::sync::{Arc, Mutex};

use async_trait::async_trait;
use bytes::Bytes;
Expand All @@ -33,6 +33,7 @@ use object_store::{
path::{self, Path},
Error, GetOptions, GetResult, ListResult, MultipartId, ObjectMeta, ObjectStore, Result,
};
use object_store::multipart::{CloudMultiPartUpload, CloudMultiPartUploadImpl};
use tokio::io::AsyncWrite;

/// scheme for HDFS File System
Expand Down Expand Up @@ -111,6 +112,58 @@ impl Display for HadoopFileSystem {
}
}

struct HdfsMultiPartUpload {
location: Path,
hdfs: Arc<HdfsFs>,
content: Arc<Mutex<HashMap<usize, Vec<u8>>>>,
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This may lead to too much memory pressure, since it has to keep all of the contents in memory before sending to the HDFS.

}

#[async_trait]
impl CloudMultiPartUploadImpl for HdfsMultiPartUpload {
async fn put_multipart_part(&self, buf: Vec<u8>, part_idx: usize) -> Result<object_store::multipart::UploadPart, std::io::Error> {
let mut content = self.content.lock().unwrap();
content.insert(part_idx, buf);

Ok(object_store::multipart::UploadPart {
content_id: part_idx.to_string(),
})
}

async fn complete(&self, _completed_parts: Vec<object_store::multipart::UploadPart>) -> Result<(), std::io::Error> {
let hdfs = self.hdfs.clone();
let location = HadoopFileSystem::path_to_filesystem(&self.location.clone());
let content = self.content.clone();

maybe_spawn_blocking(move || {
let file = match hdfs.create_with_overwrite(&location, true) {
Ok(f) => f,
Err(e) => {
return Err(to_error(e));
}
};

let content = content.lock().unwrap();
// sort by hash key and put into file
let mut keys: Vec<usize> = content.keys().cloned().collect();
keys.sort();

assert_eq!(keys[0], 0, "Missing part 0 for multipart upload");
assert_eq!(keys[keys.len() - 1], keys.len() - 1, "Missing last part for multipart upload");

for key in keys {
let buf = content.get(&key).unwrap();
file.write(buf.as_slice()).map_err(to_error)?;
}

file.close().map_err(to_error)?;

Ok(())
})
.await
.map_err(to_io_error)
}
}

#[async_trait]
impl ObjectStore for HadoopFileSystem {
// Current implementation is very simple due to missing configs,
Expand Down Expand Up @@ -138,13 +191,20 @@ impl ObjectStore for HadoopFileSystem {

async fn put_multipart(
&self,
_location: &Path,
location: &Path,
) -> Result<(MultipartId, Box<dyn AsyncWrite + Unpin + Send>)> {
todo!()
let upload = HdfsMultiPartUpload {
location: location.clone(),
hdfs: self.hdfs.clone(),
content: Arc::new(Mutex::new(HashMap::new())),
};

Ok((MultipartId::default(), Box::new(CloudMultiPartUpload::new(upload, 8))))
}

async fn abort_multipart(&self, _location: &Path, _multipart_id: &MultipartId) -> Result<()> {
todo!()
// Currently, the implementation doesn't put anything to HDFS until complete is called.
Ok(())
}

async fn get(&self, location: &Path) -> Result<GetResult> {
Expand Down Expand Up @@ -620,6 +680,27 @@ fn to_error(err: HdfsErr) -> Error {
}
}

fn to_io_error(err: Error) -> std::io::Error {
match err {
Error::Generic { store, source } => {
std::io::Error::new(std::io::ErrorKind::Other, format!("{}: {}", store, source))
}
Error::NotFound { path, source } => {
std::io::Error::new(std::io::ErrorKind::NotFound, format!("{}: {}", path, source))
}
Error::AlreadyExists { path, source } => {
std::io::Error::new(std::io::ErrorKind::AlreadyExists, format!("{}: {}", path, source))
}
Error::InvalidPath { source } => {
std::io::Error::new(std::io::ErrorKind::InvalidInput, source)
}

_ => {
std::io::Error::new(std::io::ErrorKind::Other, format!("HadoopFileSystem: {}", err))
}
}
}

#[cfg(test)]
mod tests {
use super::*;
Expand Down