[log] add more error info for hdfs reader writer (#10475)
This commit is contained in:
@ -91,19 +91,17 @@ Status HdfsFileReader::open() {
|
||||
std::stringstream ss;
|
||||
ss << "open file failed. "
|
||||
<< "(BE: " << BackendOptions::get_localhost() << ")"
|
||||
<< " namenode:" << _namenode << ", path:" << _path << ", err: " << strerror(errno);
|
||||
<< " namenode:" << _namenode << ", path:" << _path << ", err: " << hdfsGetLastError();
|
||||
return Status::InternalError(ss.str());
|
||||
}
|
||||
LOG(INFO) << "open file, namenode:" << _namenode << ", path:" << _path;
|
||||
VLOG_NOTICE << "open file, namenode:" << _namenode << ", path:" << _path;
|
||||
return seek(_current_offset);
|
||||
}
|
||||
|
||||
void HdfsFileReader::close() {
|
||||
if (!closed()) {
|
||||
if (_hdfs_file != nullptr && _hdfs_fs != nullptr) {
|
||||
std::stringstream ss;
|
||||
ss << "close hdfs file: " << _namenode << _path;
|
||||
LOG(INFO) << ss.str();
|
||||
VLOG_NOTICE << "close hdfs file: " << _namenode << _path;
|
||||
//If the hdfs file was valid, the memory associated with it will
|
||||
// be freed at the end of this call, even if there was an I/O error
|
||||
hdfsCloseFile(_hdfs_fs, _hdfs_file);
|
||||
@ -152,7 +150,7 @@ Status HdfsFileReader::readat(int64_t position, int64_t nbytes, int64_t* bytes_r
|
||||
std::stringstream ss;
|
||||
ss << "hdfsSeek failed. "
|
||||
<< "(BE: " << BackendOptions::get_localhost() << ")" << _namenode << _path
|
||||
<< ", err: " << strerror(errno);
|
||||
<< ", err: " << hdfsGetLastError();
|
||||
;
|
||||
return Status::InternalError(ss.str());
|
||||
}
|
||||
@ -163,7 +161,7 @@ Status HdfsFileReader::readat(int64_t position, int64_t nbytes, int64_t* bytes_r
|
||||
std::stringstream ss;
|
||||
ss << "Read hdfs file failed. "
|
||||
<< "(BE: " << BackendOptions::get_localhost() << ")" << _namenode << _path
|
||||
<< ", err: " << strerror(errno);
|
||||
<< ", err: " << hdfsGetLastError();
|
||||
;
|
||||
return Status::InternalError(ss.str());
|
||||
}
|
||||
@ -183,7 +181,7 @@ int64_t HdfsFileReader::size() {
|
||||
hdfsFileInfo* file_info = hdfsGetPathInfo(_hdfs_fs, _path.c_str());
|
||||
if (file_info == nullptr) {
|
||||
LOG(WARNING) << "get path info failed: " << _namenode << _path
|
||||
<< ", err: " << strerror(errno);
|
||||
<< ", err: " << hdfsGetLastError();
|
||||
;
|
||||
close();
|
||||
return -1;
|
||||
@ -203,7 +201,7 @@ Status HdfsFileReader::seek(int64_t position) {
|
||||
std::stringstream ss;
|
||||
ss << "Seek to offset failed. "
|
||||
<< "(BE: " << BackendOptions::get_localhost() << ")"
|
||||
<< " offset=" << position << ", err: " << strerror(errno);
|
||||
<< " offset=" << position << ", err: " << hdfsGetLastError();
|
||||
return Status::InternalError(ss.str());
|
||||
}
|
||||
return Status::OK();
|
||||
|
||||
@ -60,14 +60,14 @@ Status HDFSWriter::open() {
|
||||
std::string hdfs_dir = hdfs_path.parent_path().string();
|
||||
exists = hdfsExists(_hdfs_fs, hdfs_dir.c_str());
|
||||
if (exists != 0) {
|
||||
LOG(INFO) << "hdfs dir doesn't exist, create it: " << hdfs_dir;
|
||||
VLOG_NOTICE << "hdfs dir doesn't exist, create it: " << hdfs_dir;
|
||||
int ret = hdfsCreateDirectory(_hdfs_fs, hdfs_dir.c_str());
|
||||
if (ret != 0) {
|
||||
std::stringstream ss;
|
||||
ss << "create dir failed. "
|
||||
<< "(BE: " << BackendOptions::get_localhost() << ")"
|
||||
<< " namenode: " << _namenode << " path: " << hdfs_dir
|
||||
<< ", err: " << strerror(errno);
|
||||
<< ", err: " << hdfsGetLastError();
|
||||
LOG(WARNING) << ss.str();
|
||||
return Status::InternalError(ss.str());
|
||||
}
|
||||
@ -78,11 +78,11 @@ Status HDFSWriter::open() {
|
||||
std::stringstream ss;
|
||||
ss << "open file failed. "
|
||||
<< "(BE: " << BackendOptions::get_localhost() << ")"
|
||||
<< " namenode:" << _namenode << " path:" << _path << ", err: " << strerror(errno);
|
||||
<< " namenode:" << _namenode << " path:" << _path << ", err: " << hdfsGetLastError();
|
||||
LOG(WARNING) << ss.str();
|
||||
return Status::InternalError(ss.str());
|
||||
}
|
||||
LOG(INFO) << "open file. namenode:" << _namenode << ", path:" << _path;
|
||||
VLOG_NOTICE << "open file. namenode:" << _namenode << ", path:" << _path;
|
||||
return Status::OK();
|
||||
}
|
||||
|
||||
@ -96,7 +96,7 @@ Status HDFSWriter::write(const uint8_t* buf, size_t buf_len, size_t* written_len
|
||||
std::stringstream ss;
|
||||
ss << "write file failed. "
|
||||
<< "(BE: " << BackendOptions::get_localhost() << ")"
|
||||
<< "namenode:" << _namenode << " path:" << _path << ", err: " << strerror(errno);
|
||||
<< "namenode:" << _namenode << " path:" << _path << ", err: " << hdfsGetLastError();
|
||||
LOG(WARNING) << ss.str();
|
||||
return Status::InternalError(ss.str());
|
||||
}
|
||||
@ -123,7 +123,7 @@ Status HDFSWriter::close() {
|
||||
std::stringstream ss;
|
||||
ss << "failed to flush hdfs file. "
|
||||
<< "(BE: " << BackendOptions::get_localhost() << ")"
|
||||
<< "namenode:" << _namenode << " path:" << _path << ", err: " << strerror(errno);
|
||||
<< "namenode:" << _namenode << " path:" << _path << ", err: " << hdfsGetLastError();
|
||||
LOG(WARNING) << ss.str();
|
||||
return Status::InternalError(ss.str());
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user