mirror of
https://github.com/meilisearch/MeiliSearch
synced 2025-07-03 11:57:07 +02:00
move to our new S3 lib
This commit is contained in:
parent
6325cda74f
commit
98b67f217a
9 changed files with 116 additions and 212 deletions
|
@ -33,6 +33,8 @@ pub enum MeilisearchHttpError {
|
|||
.0.iter().map(|uid| format!("\"{uid}\"")).collect::<Vec<_>>().join(", "), .0.len()
|
||||
)]
|
||||
SwapIndexPayloadWrongLength(Vec<IndexUid>),
|
||||
#[error("S3 Error: {0}")]
|
||||
S3Error(#[from] strois::Error),
|
||||
#[error(transparent)]
|
||||
IndexUid(#[from] IndexUidFormatError),
|
||||
#[error(transparent)]
|
||||
|
@ -65,6 +67,7 @@ impl ErrorCode for MeilisearchHttpError {
|
|||
MeilisearchHttpError::InvalidExpression(_, _) => Code::InvalidSearchFilter,
|
||||
MeilisearchHttpError::PayloadTooLarge(_) => Code::PayloadTooLarge,
|
||||
MeilisearchHttpError::SwapIndexPayloadWrongLength(_) => Code::InvalidSwapIndexes,
|
||||
MeilisearchHttpError::S3Error(_) => Code::S3Error,
|
||||
MeilisearchHttpError::IndexUid(e) => e.error_code(),
|
||||
MeilisearchHttpError::SerdeJson(_) => Code::Internal,
|
||||
MeilisearchHttpError::HeedError(_) => Code::Internal,
|
||||
|
|
|
@ -30,6 +30,7 @@ use extractors::payload::PayloadConfig;
|
|||
use http::header::CONTENT_TYPE;
|
||||
use index_scheduler::{IndexScheduler, IndexSchedulerOptions};
|
||||
use log::error;
|
||||
use strois::Client;
|
||||
use meilisearch_auth::AuthController;
|
||||
use meilisearch_types::milli::documents::{DocumentsBatchBuilder, DocumentsBatchReader};
|
||||
use meilisearch_types::milli::update::{IndexDocumentsConfig, IndexDocumentsMethod};
|
||||
|
@ -39,8 +40,6 @@ use meilisearch_types::versioning::{check_version_file, create_version_file};
|
|||
use meilisearch_types::{compression, milli, VERSION_FILE_NAME};
|
||||
pub use option::Opt;
|
||||
use option::ScheduleSnapshot;
|
||||
use s3::creds::Credentials;
|
||||
use s3::{Bucket, Region};
|
||||
use zookeeper::ZooKeeper;
|
||||
|
||||
use crate::error::MeilisearchHttpError;
|
||||
|
@ -246,19 +245,16 @@ fn open_or_create_database_unchecked(
|
|||
zookeeper: zookeeper.clone(),
|
||||
s3: opt.s3_url.as_ref().map(|url| {
|
||||
Arc::new(
|
||||
Bucket::new(
|
||||
opt.s3_bucket.as_deref().unwrap(),
|
||||
Region::Custom { region: opt.s3_region.clone(), endpoint: url.clone() },
|
||||
Credentials {
|
||||
access_key: opt.s3_access_key.clone(),
|
||||
secret_key: opt.s3_secret_key.clone(),
|
||||
security_token: opt.s3_security_token.clone(),
|
||||
session_token: None,
|
||||
expiration: None,
|
||||
},
|
||||
)
|
||||
.unwrap()
|
||||
.with_path_style(),
|
||||
Client::builder(url)
|
||||
.unwrap()
|
||||
.key(opt.s3_access_key.as_ref().expect("Need s3 key to work").clone())
|
||||
.secret(opt.s3_secret_key.as_ref().expect("Need s3 secret to work").clone())
|
||||
.maybe_token(opt.s3_security_token.clone())
|
||||
.build()
|
||||
.bucket(opt.s3_bucket.as_ref().expect("Need an s3 bucket to work"))
|
||||
.unwrap()
|
||||
.get_or_create()
|
||||
.unwrap(),
|
||||
)
|
||||
}),
|
||||
}))
|
||||
|
|
|
@ -413,10 +413,11 @@ async fn document_addition(
|
|||
if let Some(s3) = s3 {
|
||||
update_file.seek(SeekFrom::Start(0)).unwrap();
|
||||
let mut reader = BufReader::new(&*update_file);
|
||||
match s3.put_object_stream(&mut reader, format!("/update-files/{}", uuid)) {
|
||||
Ok(_) | Err(s3::error::S3Error::Http(_, _)) => (),
|
||||
Err(e) => panic!("Error {}", e),
|
||||
}
|
||||
s3.put_object_multipart(
|
||||
format!("update-files/{}", uuid),
|
||||
&mut reader,
|
||||
50 * 1024 * 1024,
|
||||
)?;
|
||||
}
|
||||
|
||||
// we NEED to persist the file here because we moved the `udpate_file` in another task.
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue