Przeglądaj źródła

Better memory managment (#59)

* Better memory usage

* Pass args by value (and therefore pass the ownership) to avoid
unneeded cloning of data.
* Pass key/value by ownership to the database mod
* Use VecDeque instead of [] to deconstruct the args easily
* Use Bytes for Value::Blob. Introduce Value::BlobRw, which is identical
but will use BytesMut internally. Value::Blob will transform to
Value::BlobRw on demand and there is no going back (there is no BlobRw
to Blob)

* Fix KEYS, PEXPIRETIME and EXPIRETIME calls
* Fixed warnings reported by clippy
* Fixed commands:
   * GETEX
   * INCRBYFLOAT
   * EXPIREAT
   * PEXPIREAT
   * PING
   * GETEX
   * SMOVE
César D. Rodas 1 rok temu
rodzic
commit
91622299e8

+ 2 - 2
Cargo.lock

@@ -137,9 +137,9 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610"
 
 [[package]]
 name = "bytes"
-version = "1.1.0"
+version = "1.4.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
-checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8"
+checksum = "89b2fd2a0dcf38d7971e2194b6b6eebab45ae01067456a7fd93d5547a61b70be"
 
 [[package]]
 name = "cfg-if"

+ 1 - 1
Cargo.toml

@@ -7,7 +7,7 @@ edition = "2018"
 # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
 
 [dependencies]
-bytes = "1"
+bytes = "1.4"
 byteorder = "1.2.2"
 redis-zero-protocol-parser = "^0.3"
 redis-config-parser = {path = "redis-config-parser"}

+ 24 - 1
Makefile

@@ -6,7 +6,30 @@ build:
 	cargo build --release
 test-single: build
 	./runtest  --clients 1 \
-		--single unit/other \
+		--skipunit unit/dump \
+		--skipunit unit/auth \
+		--skipunit unit/protocol \
+		--skipunit unit/scan \
+		--skipunit unit/info \
+		--skipunit unit/type/zset \
+		--skipunit unit/bitops \
+		--skipunit unit/type/stream \
+		--skipunit unit/type/stream-cgroups \
+		--skipunit unit/sort \
+		--skipunit unit/aofrw \
+		--skipunit unit/acl \
+		--skipunit unit/latency-monitor \
+		--skipunit unit/slowlog \
+		--skipunit unit/scripting \
+		--skipunit unit/introspection \
+		--skipunit unit/introspection-2 \
+		--skipunit unit/bitfield \
+		--skipunit unit/geo \
+		--skipunit unit/pause \
+		--skipunit unit/hyperloglog \
+		--skipunit unit/lazyfree \
+		--skipunit unit/tracking \
+		--skipunit unit/querybuf \
 		--ignore-encoding \
 		--tags -needs:repl \
 		--tags -leaks \

+ 4 - 4
redis-config-parser/src/parser.rs

@@ -1,6 +1,6 @@
 use std::borrow::Cow;
 
-#[derive(Debug, PartialEq, Clone)]
+#[derive(Debug, Eq, PartialEq, Clone)]
 pub enum Error {
     /// The data is incomplete. This it not an error per-se, but rather a
     /// mechanism to let the caller know they should keep buffering data before
@@ -8,14 +8,14 @@ pub enum Error {
     Partial,
 }
 
-#[derive(Debug, PartialEq, Clone)]
+#[derive(Debug, Eq, PartialEq, Clone)]
 pub enum Args<'a> {
     None,
     Single(Cow<'a, str>),
     Multiple(Vec<Cow<'a, str>>),
 }
 
-#[derive(Debug, PartialEq, Clone)]
+#[derive(Debug, Eq, PartialEq, Clone)]
 pub struct ConfigValue<'a> {
     pub name: Cow<'a, str>,
     pub args: Args<'a>,
@@ -59,7 +59,7 @@ macro_rules! read_until {
 
 pub fn parse(bytes: &'_ [u8]) -> Result<(&'_ [u8], ConfigValue<'_>), Error> {
     let bytes = skip!(bytes, vec![b' ', b'\t', b'\r', b'\n']);
-    let bytes = if bytes.get(0) == Some(&b'#') {
+    let bytes = if bytes.first() == Some(&b'#') {
         // The entire line is a comment, skip the whole line
         let (bytes, _) = read_until!(bytes, vec![b'\n']);
         skip!(bytes, vec![b' ', b'\t', b'\r', b'\n'])

+ 21 - 18
src/cmd/client.rs

@@ -1,24 +1,24 @@
 //!  # Client-group command handlers
-
 use crate::{
     connection::{Connection, ConnectionStatus, UnblockReason},
     error::Error,
     value::{bytes_to_int, bytes_to_number, Value},
 };
 use bytes::Bytes;
-use std::sync::Arc;
+use std::{collections::VecDeque, sync::Arc};
 
 /// "client" command handler
 ///
 /// Documentation:
 ///  * <https://redis.io/commands/client-id>
-pub async fn client(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    let sub = String::from_utf8_lossy(&args[1]);
+pub async fn client(conn: &Connection, mut args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let sub = args.pop_front().ok_or(Error::Syntax)?;
+    let sub = String::from_utf8_lossy(&sub);
 
     let expected = match sub.to_lowercase().as_str() {
-        "setname" => Some(3),
+        "setname" => Some(1),
         "unblock" => None,
-        _ => Some(2),
+        _ => Some(0),
     };
 
     if let Some(expected) = expected {
@@ -41,7 +41,7 @@ pub async fn client(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
             Ok(list_client.into())
         }
         "unblock" => {
-            let reason = match args.get(3) {
+            let reason = match args.get(1) {
                 Some(x) => match String::from_utf8_lossy(&x).to_uppercase().as_str() {
                     "TIMEOUT" => UnblockReason::Timeout,
                     "ERROR" => UnblockReason::Error,
@@ -51,7 +51,7 @@ pub async fn client(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
             };
             let other_conn = match conn
                 .all_connections()
-                .get_by_conn_id(bytes_to_int(&args[2])?)
+                .get_by_conn_id(bytes_to_int(&args[0])?)
             {
                 Some(conn) => conn,
                 None => return Ok(0.into()),
@@ -69,7 +69,7 @@ pub async fn client(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
             })
         }
         "setname" => {
-            let name = String::from_utf8_lossy(&args[2]).to_string();
+            let name = String::from_utf8_lossy(&args[0]).to_string();
             conn.set_name(name);
             Ok(Value::Ok)
         }
@@ -84,27 +84,30 @@ pub async fn client(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
 ///
 /// Documentation:
 ///  * <https://redis.io/commands/echo>
-pub async fn echo(_conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    Ok(Value::new(&args[1]))
+pub async fn echo(_conn: &Connection, mut args: VecDeque<Bytes>) -> Result<Value, Error> {
+    Ok(Value::Blob(args.pop_front().ok_or(Error::Syntax)?))
 }
 
 /// Select the Redis logical database having the specified zero-based numeric
 /// index. New connections always use the database 0.
-pub async fn select(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    conn.selectdb(bytes_to_number(&args[1])?)
+pub async fn select(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
+    conn.selectdb(bytes_to_number(&args[0])?)
 }
 
 /// "ping" command handler
 ///
 /// Documentation:
 ///  * <https://redis.io/commands/ping>
-pub async fn ping(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
+pub async fn ping(conn: &Connection, mut args: VecDeque<Bytes>) -> Result<Value, Error> {
     if conn.status() == ConnectionStatus::Pubsub {
-        return Ok(Value::Array(vec!["pong".into(), args.get(1).into()]));
+        return Ok(Value::Array(vec![
+            "pong".into(),
+            args.pop_front().map(|p| Value::Blob(p)).unwrap_or_default(),
+        ]));
     }
     match args.len() {
-        1 => Ok(Value::String("PONG".to_owned())),
-        2 => Ok(Value::new(&args[1])),
+        0 => Ok(Value::String("PONG".to_owned())),
+        1 => Ok(Value::Blob(args.pop_front().ok_or(Error::Syntax)?)),
         _ => Err(Error::InvalidArgsCount("ping".to_owned())),
     }
 }
@@ -113,7 +116,7 @@ pub async fn ping(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
 ///
 /// Documentation:
 ///  * <https://redis.io/commands/reset>
-pub async fn reset(conn: &Connection, _: &[Bytes]) -> Result<Value, Error> {
+pub async fn reset(conn: &Connection, _: VecDeque<Bytes>) -> Result<Value, Error> {
     conn.reset();
     Ok(Value::String("RESET".to_owned()))
 }

+ 132 - 77
src/cmd/hash.rs

@@ -4,12 +4,12 @@ use crate::{
     connection::Connection,
     error::Error,
     value::Value,
-    value::{bytes_to_number, float::Float},
+    value::{self, bytes_to_number, float::Float},
 };
 use bytes::Bytes;
 use rand::Rng;
 use std::{
-    collections::{BTreeMap, HashMap},
+    collections::{BTreeMap, HashMap, VecDeque},
     convert::TryFrom,
     ops::AddAssign,
     str::FromStr,
@@ -18,17 +18,18 @@ use std::{
 /// Removes the specified fields from the hash stored at key. Specified fields that do not exist
 /// within this hash are ignored. If key does not exist, it is treated as an empty hash and this
 /// command returns 0.
-pub async fn hdel(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
+pub async fn hdel(conn: &Connection, mut args: VecDeque<Bytes>) -> Result<Value, Error> {
     let mut is_empty = false;
+    let key = args.pop_front().ok_or(Error::Syntax)?;
     let result = conn.db().get_map_or(
-        &args[1],
+        &key,
         |v| match v {
             Value::Hash(h) => {
                 let mut h = h.write();
                 let mut total: i64 = 0;
 
-                for key in (&args[2..]).iter() {
-                    if h.remove(key).is_some() {
+                for key in args.into_iter() {
+                    if h.remove(&key).is_some() {
                         total += 1;
                     }
                 }
@@ -43,20 +44,20 @@ pub async fn hdel(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
     )?;
 
     if is_empty {
-        let _ = conn.db().del(&[args[1].clone()]);
+        let _ = conn.db().del(&[key]);
     } else {
-        conn.db().bump_version(&args[1]);
+        conn.db().bump_version(&key);
     }
 
     Ok(result)
 }
 
 /// Returns if field is an existing field in the hash stored at key.
-pub async fn hexists(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
+pub async fn hexists(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
     conn.db().get_map_or(
-        &args[1],
+        &args[0],
         |v| match v {
-            Value::Hash(h) => Ok(if h.read().get(&args[2]).is_some() {
+            Value::Hash(h) => Ok(if h.read().get(&args[1]).is_some() {
                 1.into()
             } else {
                 0.into()
@@ -68,13 +69,13 @@ pub async fn hexists(conn: &Connection, args: &[Bytes]) -> Result<Value, Error>
 }
 
 /// Returns the value associated with field in the hash stored at key.
-pub async fn hget(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
+pub async fn hget(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
     conn.db().get_map_or(
-        &args[1],
+        &args[0],
         |v| match v {
             Value::Hash(h) => Ok(h
                 .read()
-                .get(&args[2])
+                .get(&args[1])
                 .map(|v| Value::new(v))
                 .unwrap_or_default()),
             _ => Err(Error::WrongType),
@@ -85,16 +86,16 @@ pub async fn hget(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
 
 /// Returns all fields and values of the hash stored at key. In the returned value, every field
 /// name is followed by its value, so the length of the reply is twice the size of the hash.
-pub async fn hgetall(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
+pub async fn hgetall(conn: &Connection, mut args: VecDeque<Bytes>) -> Result<Value, Error> {
     conn.db().get_map_or(
-        &args[1],
+        &args[0],
         |v| match v {
             Value::Hash(h) => {
                 let mut ret = vec![];
 
                 for (key, value) in h.read().iter() {
-                    ret.push(Value::new(&key));
-                    ret.push(Value::new(&value));
+                    ret.push(Value::new(key));
+                    ret.push(Value::new(value));
                 }
 
                 Ok(ret.into())
@@ -109,12 +110,12 @@ pub async fn hgetall(conn: &Connection, args: &[Bytes]) -> Result<Value, Error>
 /// specified increment. If the increment value is negative, the result is to have the hash field
 /// value decremented instead of incremented. If the field does not exist, it is set to 0 before
 /// performing the operation.
-pub async fn hincrby_int(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
+pub async fn hincrby_int(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
     let result = conn
         .db()
-        .hincrby::<i64>(&args[1], &args[2], &args[3], "an integer")?;
+        .hincrby::<i64>(&args[0], &args[1], &args[2], "an integer")?;
 
-    conn.db().bump_version(&args[1]);
+    conn.db().bump_version(&args[0]);
 
     Ok(result)
 }
@@ -123,26 +124,26 @@ pub async fn hincrby_int(conn: &Connection, args: &[Bytes]) -> Result<Value, Err
 /// specified increment. If the increment value is negative, the result is to have the hash field
 /// value decremented instead of incremented. If the field does not exist, it is set to 0 before
 /// performing the operation.
-pub async fn hincrby_float(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
+pub async fn hincrby_float(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
     let result = conn
         .db()
-        .hincrby::<Float>(&args[1], &args[2], &args[3], "a float")?;
+        .hincrby::<Float>(&args[0], &args[1], &args[2], "a float")?;
 
-    conn.db().bump_version(&args[1]);
+    conn.db().bump_version(&args[0]);
 
     Ok(result)
 }
 
 /// Returns all field names in the hash stored at key.
-pub async fn hkeys(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
+pub async fn hkeys(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
     conn.db().get_map_or(
-        &args[1],
+        &args[0],
         |v| match v {
             Value::Hash(h) => {
                 let mut ret = vec![];
 
                 for key in h.read().keys() {
-                    ret.push(Value::new(&key));
+                    ret.push(Value::new(key));
                 }
 
                 Ok(ret.into())
@@ -154,9 +155,9 @@ pub async fn hkeys(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
 }
 
 /// Returns the number of fields contained in the hash stored at key.
-pub async fn hlen(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
+pub async fn hlen(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
     conn.db().get_map_or(
-        &args[1],
+        &args[0],
         |v| match v {
             Value::Hash(h) => Ok(h.read().len().into()),
             _ => Err(Error::WrongType),
@@ -166,14 +167,15 @@ pub async fn hlen(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
 }
 
 /// Returns the values associated with the specified fields in the hash stored at key.
-pub async fn hmget(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
+pub async fn hmget(conn: &Connection, mut args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let key = args.pop_front().ok_or(Error::Syntax)?;
     conn.db().get_map_or(
-        &args[1],
+        &key,
         |v| match v {
             Value::Hash(h) => {
                 let h = h.read();
 
-                Ok((&args[2..])
+                Ok(args
                     .iter()
                     .map(|key| h.get(key).map(|v| Value::new(v)).unwrap_or_default())
                     .collect::<Vec<Value>>()
@@ -182,7 +184,7 @@ pub async fn hmget(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
             _ => Err(Error::WrongType),
         },
         || {
-            Ok((&args[2..])
+            Ok(args
                 .iter()
                 .map(|_| Value::Null)
                 .collect::<Vec<Value>>()
@@ -192,15 +194,15 @@ pub async fn hmget(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
 }
 
 /// Returns random keys (or values) from a hash
-pub async fn hrandfield(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
+pub async fn hrandfield(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
     let (count, with_values) = match args.len() {
-        2 => (None, false),
-        3 => (Some(bytes_to_number::<i64>(&args[2])?), false),
-        4 => {
-            if !(check_arg!(args, 3, "WITHVALUES")) {
+        1 => (None, false),
+        2 => (Some(bytes_to_number::<i64>(&args[1])?), false),
+        3 => {
+            if !(check_arg!(args, 2, "WITHVALUES")) {
                 return Err(Error::Syntax);
             }
-            (Some(bytes_to_number::<i64>(&args[2])?), true)
+            (Some(bytes_to_number::<i64>(&args[1])?), true)
         }
         _ => return Err(Error::InvalidArgsCount("hrandfield".to_owned())),
     };
@@ -212,7 +214,7 @@ pub async fn hrandfield(conn: &Connection, args: &[Bytes]) -> Result<Value, Erro
     };
 
     conn.db().get_map_or(
-        &args[1],
+        &args[0],
         |v| match v {
             Value::Hash(h) => {
                 let mut ret = vec![];
@@ -232,17 +234,17 @@ pub async fn hrandfield(conn: &Connection, args: &[Bytes]) -> Result<Value, Erro
                 i = 0;
                 for val in rand_sorted.values() {
                     if single {
-                        return Ok(Value::new(&val.0));
+                        return Ok(Value::new(val.0));
                     }
 
                     if i == count {
                         break;
                     }
 
-                    ret.push(Value::new(&val.0));
+                    ret.push(Value::new(val.0));
 
                     if with_values {
-                        ret.push(Value::new(&val.1));
+                        ret.push(Value::new(val.1));
                     }
 
                     i += 1;
@@ -258,47 +260,99 @@ pub async fn hrandfield(conn: &Connection, args: &[Bytes]) -> Result<Value, Erro
 
 /// Sets field in the hash stored at key to value. If key does not exist, a new key holding a hash
 /// is created. If field already exists in the hash, it is overwritten.
-pub async fn hset(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    let is_hmset = check_arg!(args, 0, "HMSET");
+pub async fn hmset(conn: &Connection, mut args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let key = args.pop_front().ok_or(Error::Syntax)?;
     if args.len() % 2 == 1 {
         return Err(Error::InvalidArgsCount("hset".to_owned()));
     }
     let result = conn.db().get_map_or(
-        &args[1],
+        &key,
+        |v| match v {
+            Value::Hash(h) => {
+                let mut h = h.write();
+                let mut args = args.clone();
+                loop {
+                    if args.is_empty() {
+                        break;
+                    }
+                    let key = args.pop_front().ok_or(Error::Syntax)?;
+                    let value = args.pop_front().ok_or(Error::Syntax)?;
+                    h.insert(key, value);
+                }
+                Ok(Value::Ok)
+            }
+            _ => Err(Error::WrongType),
+        },
+        || {
+            #[allow(clippy::mutable_key_type)]
+            let mut h = HashMap::new();
+            let mut args = args.clone();
+            loop {
+                if args.is_empty() {
+                    break;
+                }
+                let key = args.pop_front().ok_or(Error::Syntax)?;
+                let value = args.pop_front().ok_or(Error::Syntax)?;
+                h.insert(key, value);
+            }
+            let len = h.len();
+            conn.db().set(key.clone(), h.into(), None);
+            Ok(Value::Ok)
+        },
+    )?;
+
+    conn.db().bump_version(&key);
+
+    Ok(result)
+}
+
+/// Sets field in the hash stored at key to value. If key does not exist, a new key holding a hash
+/// is created. If field already exists in the hash, it is overwritten.
+pub async fn hset(conn: &Connection, mut args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let key = args.pop_front().ok_or(Error::Syntax)?;
+    if args.len() % 2 == 1 {
+        return Err(Error::InvalidArgsCount("hset".to_owned()));
+    }
+    let result = conn.db().get_map_or(
+        &key,
         |v| match v {
             Value::Hash(h) => {
                 let mut h = h.write();
                 let mut e: i64 = 0;
-                for i in (2..args.len()).step_by(2) {
-                    if h.insert(args[i].clone(), args[i + 1].clone()).is_none() {
+                let mut args = args.clone();
+                loop {
+                    if args.is_empty() {
+                        break;
+                    }
+                    let key = args.pop_front().ok_or(Error::Syntax)?;
+                    let value = args.pop_front().ok_or(Error::Syntax)?;
+                    if h.insert(key, value).is_none() {
                         e += 1;
                     }
                 }
-                if is_hmset {
-                    Ok(Value::Ok)
-                } else {
-                    Ok(e.into())
-                }
+                Ok(e.into())
             }
             _ => Err(Error::WrongType),
         },
         || {
             #[allow(clippy::mutable_key_type)]
             let mut h = HashMap::new();
-            for i in (2..args.len()).step_by(2) {
-                h.insert(args[i].clone(), args[i + 1].clone());
+            let mut args = args.clone();
+            loop {
+                if args.is_empty() {
+                    break;
+                }
+                let key = args.pop_front().ok_or(Error::Syntax)?;
+                let value = args.pop_front().ok_or(Error::Syntax)?;
+                h.insert(key, value);
             }
             let len = h.len();
-            conn.db().set(&args[1], h.into(), None);
-            if is_hmset {
-                Ok(Value::Ok)
-            } else {
-                Ok(len.into())
-            }
+            conn.db().set(key.clone(), h.into(), None);
+            Ok(len.into())
         },
     )?;
 
-    conn.db().bump_version(&args[1]);
+    conn.db().bump_version(&key);
 
     Ok(result)
 }
@@ -306,17 +360,20 @@ pub async fn hset(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
 /// Sets field in the hash stored at key to value, only if field does not yet exist. If key does
 /// not exist, a new key holding a hash is created. If field already exists, this operation has no
 /// effect.
-pub async fn hsetnx(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
+pub async fn hsetnx(conn: &Connection, mut args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let key = args.pop_front().ok_or(Error::Syntax)?;
+    let sub_key = args.pop_front().ok_or(Error::Syntax)?;
+    let value = args.pop_front().ok_or(Error::Syntax)?;
     let result = conn.db().get_map_or(
-        &args[1],
+        &key,
         |v| match v {
             Value::Hash(h) => {
                 let mut h = h.write();
 
-                if h.get(&args[2]).is_some() {
+                if h.get(&sub_key).is_some() {
                     Ok(0.into())
                 } else {
-                    h.insert(args[2].clone(), args[3].clone());
+                    h.insert(sub_key.clone(), value.clone());
                     Ok(1.into())
                 }
             }
@@ -325,17 +382,15 @@ pub async fn hsetnx(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
         || {
             #[allow(clippy::mutable_key_type)]
             let mut h = HashMap::new();
-            for i in (2..args.len()).step_by(2) {
-                h.insert(args[i].clone(), args[i + 1].clone());
-            }
+            h.insert(sub_key.clone(), value.clone());
             let len = h.len();
-            conn.db().set(&args[1], h.into(), None);
+            conn.db().set(key.clone(), h.into(), None);
             Ok(len.into())
         },
     )?;
 
     if result == Value::Integer(1) {
-        conn.db().bump_version(&args[1]);
+        conn.db().bump_version(&key);
     }
 
     Ok(result)
@@ -343,13 +398,13 @@ pub async fn hsetnx(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
 
 /// Returns the string length of the value associated with field in the hash stored at key. If the
 /// key or the field do not exist, 0 is returned.
-pub async fn hstrlen(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
+pub async fn hstrlen(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
     conn.db().get_map_or(
-        &args[1],
+        &args[0],
         |v| match v {
             Value::Hash(h) => Ok(h
                 .read()
-                .get(&args[2])
+                .get(&args[1])
                 .map(|v| v.len())
                 .unwrap_or_default()
                 .into()),
@@ -360,15 +415,15 @@ pub async fn hstrlen(conn: &Connection, args: &[Bytes]) -> Result<Value, Error>
 }
 
 /// Returns all values in the hash stored at key.
-pub async fn hvals(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
+pub async fn hvals(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
     conn.db().get_map_or(
-        &args[1],
+        &args[0],
         |v| match v {
             Value::Hash(h) => {
                 let mut ret = vec![];
 
                 for value in h.read().values() {
-                    ret.push(Value::new(&value));
+                    ret.push(Value::new(value));
                 }
 
                 Ok(ret.into())

+ 175 - 78
src/cmd/key.rs

@@ -11,6 +11,7 @@ use crate::{
 };
 use bytes::Bytes;
 use std::{
+    collections::VecDeque,
     convert::TryInto,
     str::FromStr,
     time::{SystemTime, UNIX_EPOCH},
@@ -26,22 +27,24 @@ use tokio::time::{Duration, Instant};
 ///
 /// The command returns an error when the destination key already exists. The
 /// REPLACE option removes the destination key before copying the value to it.
-pub async fn copy(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    let mut skip = 3;
-    let target_db = if args.len() > 4 && check_arg!(args, 3, "DB") {
-        skip += 2;
+pub async fn copy(conn: &Connection, mut args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let source = args.pop_front().ok_or(Error::Syntax)?;
+    let destination = args.pop_front().ok_or(Error::Syntax)?;
+    let target_db = if !args.is_empty() && check_arg!(args, 0, "DB") {
+        let _ = args.pop_front();
+        let db = args.pop_front().ok_or(Error::Syntax)?;
         Some(
             conn.all_connections()
                 .get_databases()
-                .get(bytes_to_int(&args[4])?)?
+                .get(bytes_to_int(&db)?)?
                 .clone(),
         )
     } else {
         None
     };
     let replace = match args
-        .get(skip)
-        .map(|m| String::from_utf8_lossy(m).to_uppercase())
+        .pop_front()
+        .map(|m| String::from_utf8_lossy(&m).to_uppercase())
     {
         Some(value) => {
             if value == "REPLACE" {
@@ -54,7 +57,7 @@ pub async fn copy(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
     };
     let result = if conn
         .db()
-        .copy(&args[1], &args[2], replace.into(), target_db)?
+        .copy(source, destination, replace.into(), target_db)?
     {
         1
     } else {
@@ -65,77 +68,145 @@ pub async fn copy(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
 }
 
 /// Removes the specified keys. A key is ignored if it does not exist.
-pub async fn del(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    Ok(conn.db().del(&args[1..]))
+pub async fn del(conn: &Connection, mut args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let keys = args.into_iter().collect::<Vec<_>>();
+    Ok(conn.db().del(&keys))
 }
 
 /// Returns if key exists.
-pub async fn exists(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    Ok(conn.db().exists(&args[1..]).into())
+pub async fn exists(conn: &Connection, mut args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let keys = args.into_iter().collect::<Vec<_>>();
+    Ok(conn.db().exists(&keys).into())
 }
 
-/// Set a timeout on key. After the timeout has expired, the key will automatically be deleted. A
-/// key with an associated timeout is often said to be volatile in Redis terminology.
-///
-/// The timeout will only be cleared by commands that delete or overwrite the contents of the key,
-/// including DEL, SET, GETSET and all the *STORE commands. This means that all the operations that
-/// conceptually alter the value stored at the key without replacing it with a new one will leave
-/// the timeout untouched. For instance, incrementing the value of a key with INCR, pushing a new
-/// value into a list with LPUSH, or altering the field value of a hash with HSET are all
-/// operations that will leave the timeout untouched.
-///
-/// The timeout can also be cleared, turning the key back into a persistent key, using the PERSIST
-/// command.
-pub async fn expire(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    let is_milliseconds = check_arg!(args, 0, "PEXPIRE");
+async fn expire_ex(
+    command: &[u8],
+    is_milliseconds: bool,
+    conn: &Connection,
+    mut args: VecDeque<Bytes>,
+) -> Result<Value, Error> {
+    let key = args.pop_front().ok_or(Error::Syntax)?;
+    let expiration = args.pop_front().ok_or(Error::Syntax)?;
 
-    let expires_at = Expiration::new(&args[2], is_milliseconds, false, &args[0])?;
+    let expires_at = Expiration::new(&expiration, is_milliseconds, false, command)?;
 
     if expires_at.is_negative {
         // Delete key right away
-        return Ok(conn.db().del(&args[1..2]));
+        return Ok(conn.db().del(&[key]));
     }
 
+    let opts = args.into_iter().collect::<Vec<_>>();
+
     conn.db()
-        .set_ttl(&args[1], expires_at.try_into()?, (&args[3..]).try_into()?)
+        .set_ttl(&key, expires_at.try_into()?, opts.try_into()?)
+}
+
+/// Set a timeout on key. After the timeout has expired, the key will
+/// automatically be deleted. A key with an associated timeout is often said to
+/// be volatile in Redis terminology.
+///
+/// The timeout will only be cleared by commands that delete or overwrite the
+/// contents of the key, including DEL, SET, GETSET and all the *STORE commands.
+/// This means that all the operations that conceptually alter the value stored
+/// at the key without replacing it with a new one will leave the timeout
+/// untouched. For instance, incrementing the value of a key with INCR, pushing
+/// a new value into a list with LPUSH, or altering the field value of a hash
+/// with HSET are all operations that will leave the timeout untouched.
+///
+/// The timeout can also be cleared, turning the key back into a persistent key,
+/// using the PERSIST command.
+///
+/// If a key is renamed with RENAME, the associated time to live is transferred
+/// to the new key name.
+///
+/// If a key is overwritten by RENAME, like in the case of an existing key Key_A
+/// that is overwritten by a call like RENAME Key_B Key_A, it does not matter if
+/// the original Key_A had a timeout associated or not, the new key Key_A will
+/// inherit all the characteristics of Key_B.
+///
+/// Note that calling EXPIRE/PEXPIRE with a non-positive timeout or
+/// EXPIREAT/PEXPIREAT with a time in the past will result in the key being
+/// deleted rather than expired
+pub async fn expire(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
+    expire_ex(b"EXPIRE", false, conn, args).await
+}
+
+/// This command works exactly like EXPIRE but the time to live of the key is
+/// specified in milliseconds instead of seconds.
+pub async fn pexpire(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
+    expire_ex(b"PEXPIRE", true, conn, args).await
 }
 
 /// Returns the string representation of the type of the value stored at key.
 /// The different types that can be returned are: string, list, set, zset, hash
 /// and stream.
-pub async fn data_type(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    Ok(conn.db().get_data_type(&args[1]).into())
+pub async fn data_type(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
+    Ok(conn.db().get_data_type(&args[0]).into())
 }
 
 /// EXPIREAT has the same effect and semantic as EXPIRE, but instead of specifying the number of
 /// seconds representing the TTL (time to live), it takes an absolute Unix timestamp (seconds since
 /// January 1, 1970). A timestamp in the past will delete the key immediately.
-pub async fn expire_at(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    let is_milliseconds = check_arg!(args, 0, "PEXPIREAT");
-    let expires_at = Expiration::new(&args[2], is_milliseconds, true, &args[0])?;
+pub async fn expire_at(conn: &Connection, mut args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let key = args.pop_front().ok_or(Error::Syntax)?;
+    let expiration = args.pop_front().ok_or(Error::Syntax)?;
+    let expires_at = Expiration::new(&expiration, false, true, b"EXPIREAT")?;
 
     if expires_at.is_negative {
         // Delete key right away
-        return Ok(conn.db().del(&args[1..2]));
+        return Ok(conn.db().del(&[key]));
     }
 
-    conn.db()
-        .set_ttl(&args[1], expires_at.try_into()?, (&args[3..]).try_into()?)
+    conn.db().set_ttl(
+        &key,
+        expires_at.try_into()?,
+        args.into_iter().collect::<Vec<_>>().try_into()?,
+    )
+}
+
+/// PEXPIREAT has the same effect and semantic as EXPIREAT, but the Unix time at
+/// which the key will expire is specified in milliseconds instead of seconds.
+pub async fn pexpire_at(conn: &Connection, mut args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let key = args.pop_front().ok_or(Error::Syntax)?;
+    let expiration = args.pop_front().ok_or(Error::Syntax)?;
+    let expires_at = Expiration::new(&expiration, true, true, b"PEXPIREAT")?;
+
+    if expires_at.is_negative {
+        // Delete key right away
+        return Ok(conn.db().del(&[key]));
+    }
+
+    conn.db().set_ttl(
+        &key,
+        expires_at.try_into()?,
+        args.into_iter().collect::<Vec<_>>().try_into()?,
+    )
+}
+
+/// PEXPIRETIME has the same semantic as EXPIRETIME, but returns the absolute
+/// Unix expiration timestamp in milliseconds instead of seconds.
+pub async fn p_expire_time(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let ttl = match conn.db().ttl(&args[0]) {
+        Some(Some(ttl)) => {
+            // Is there a better way? There should be!
+            let secs: i64 = (ttl - Instant::now()).as_millis() as i64;
+            secs + 1 + (now().as_millis() as i64)
+        }
+        Some(None) => -1,
+        None => -2,
+    };
+
+    Ok(ttl.into())
 }
 
 /// Returns the absolute Unix timestamp (since January 1, 1970) in seconds at which the given key
 /// will expire.
-pub async fn expire_time(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    let ttl = match conn.db().ttl(&args[1]) {
+pub async fn expire_time(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let ttl = match conn.db().ttl(&args[0]) {
         Some(Some(ttl)) => {
             // Is there a better way? There should be!
-            if check_arg!(args, 0, "EXPIRETIME") {
-                let secs: i64 = (ttl - Instant::now()).as_secs() as i64;
-                secs + 1 + (now().as_secs() as i64)
-            } else {
-                let secs: i64 = (ttl - Instant::now()).as_millis() as i64;
-                secs + 1 + (now().as_millis() as i64)
-            }
+            let secs: i64 = (ttl - Instant::now()).as_secs() as i64;
+            secs + 1 + (now().as_secs() as i64)
         }
         Some(None) => -1,
         None => -2,
@@ -145,21 +216,23 @@ pub async fn expire_time(conn: &Connection, args: &[Bytes]) -> Result<Value, Err
 }
 
 /// Returns all keys that matches a given pattern
-pub async fn keys(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    Ok(conn.db().get_all_keys(&args[1])?.into())
+pub async fn keys(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
+    Ok(conn.db().get_all_keys(&args[0])?.into())
 }
 
 /// Move key from the currently selected database (see SELECT) to the specified
 /// destination database. When key already exists in the destination database,
 /// or it does not exist in the source database, it does nothing. It is possible
 /// to use MOVE as a locking primitive because of this.
-pub async fn move_key(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
+pub async fn move_key(conn: &Connection, mut args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let key = args.pop_front().ok_or(Error::Syntax)?;
+    let target_db = args.pop_front().ok_or(Error::Syntax)?;
     let target_db = conn
         .all_connections()
         .get_databases()
-        .get(bytes_to_int(&args[2])?)?;
+        .get(bytes_to_int(&target_db)?)?;
 
-    Ok(if conn.db().move_key(&args[1], target_db)? {
+    Ok(if conn.db().move_key(key, target_db)? {
         1.into()
     } else {
         0.into()
@@ -167,10 +240,10 @@ pub async fn move_key(conn: &Connection, args: &[Bytes]) -> Result<Value, Error>
 }
 
 /// Return information about the object/value stored in the database
-pub async fn object(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    let subcommand = String::from_utf8_lossy(&args[1]).to_lowercase();
+pub async fn object(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let subcommand = String::from_utf8_lossy(&args[0]).to_lowercase();
 
-    let expected_args = if subcommand == "help" { 2 } else { 3 };
+    let expected_args = if subcommand == "help" { 1 } else { 2 };
 
     if expected_args != args.len() {
         return Err(Error::SubCommandNotFound(
@@ -181,7 +254,7 @@ pub async fn object(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
 
     match subcommand.as_str() {
         "help" => super::help::object(),
-        "refcount" => Ok(if conn.db().exists(&[args[2].clone()]) == 1 {
+        "refcount" => Ok(if conn.db().exists(&[args[1].clone()]) == 1 {
             1.into()
         } else {
             Value::Null
@@ -194,7 +267,7 @@ pub async fn object(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
 }
 
 /// Return a random key from the currently selected database.
-pub async fn randomkey(conn: &Connection, _: &[Bytes]) -> Result<Value, Error> {
+pub async fn randomkey(conn: &Connection, _: VecDeque<Bytes>) -> Result<Value, Error> {
     conn.db().randomkey()
 }
 
@@ -203,10 +276,19 @@ pub async fn randomkey(conn: &Connection, _: &[Bytes]) -> Result<Value, Error> {
 /// an implicit DEL operation, so if the deleted key contains a very big value
 /// it may cause high latency even if RENAME itself is usually a constant-time
 /// operation.
-pub async fn rename(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    let is_rename = check_arg!(args, 0, "RENAME");
-    if conn.db().rename(&args[1], &args[2], is_rename.into())? {
-        Ok(if is_rename { Value::Ok } else { 1.into() })
+pub async fn rename(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
+    if conn.db().rename(&args[0], &args[1], true.into())? {
+        Ok(Value::Ok)
+    } else {
+        Ok(0.into())
+    }
+}
+
+/// Renames key to newkey if newkey does not yet exist. It returns an error when
+/// key does not exist.
+pub async fn renamenx(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
+    if conn.db().rename(&args[0], &args[1], false.into())? {
+        Ok(1.into())
     } else {
         Ok(0.into())
     }
@@ -215,22 +297,25 @@ pub async fn rename(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
 /// SCAN is a cursor based iterator. This means that at every call of the
 /// command, the server returns an updated cursor that the user needs to use as
 /// the cursor argument in the next call.
-pub async fn scan(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    let cursor: Cursor = (&args[1]).try_into()?;
-    let mut current = 2;
+pub async fn scan(conn: &Connection, mut args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let cursor = args.pop_front().ok_or(Error::Syntax)?;
+    let cursor: Cursor = (&cursor).try_into()?;
     let mut pattern = None;
     let mut count = None;
     let mut typ = None;
 
-    for i in (2..args.len()).step_by(2) {
-        let value = args
-            .get(i + 1)
-            .ok_or(Error::InvalidArgsCount("SCAN".to_owned()))?;
-        match String::from_utf8_lossy(&args[i]).to_uppercase().as_str() {
+    loop {
+        let key = if let Some(key) = args.pop_front() {
+            key
+        } else {
+            break;
+        };
+        let value = args.pop_front().ok_or(Error::Syntax)?;
+        match String::from_utf8_lossy(&key).to_uppercase().as_str() {
             "MATCH" => pattern = Some(value),
             "COUNT" => {
                 count = Some(
-                    bytes_to_number(value)
+                    bytes_to_number(&value)
                         .map_err(|_| Error::InvalidArgsCount("SCAN".to_owned()))?,
                 )
             }
@@ -249,15 +334,27 @@ pub async fn scan(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
 /// Returns the remaining time to live of a key that has a timeout. This introspection capability
 /// allows a Redis client to check how many seconds a given key will continue to be part of the
 /// dataset.
-pub async fn ttl(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    let ttl = match conn.db().ttl(&args[1]) {
+pub async fn ttl(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let ttl = match conn.db().ttl(&args[0]) {
         Some(Some(ttl)) => {
             let ttl = ttl - Instant::now();
-            if check_arg!(args, 0, "TTL") {
-                ttl.as_secs() as i64 + 1
-            } else {
-                ttl.as_millis() as i64
-            }
+            ttl.as_secs() as i64 + 1
+        }
+        Some(None) => -1,
+        None => -2,
+    };
+
+    Ok(ttl.into())
+}
+
+/// Like TTL this command returns the remaining time to live of a key that has
+/// an expire set, with the sole difference that TTL returns the amount of
+/// remaining time in seconds while PTTL returns it in milliseconds.
+pub async fn pttl(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let ttl = match conn.db().ttl(&args[0]) {
+        Some(Some(ttl)) => {
+            let ttl = ttl - Instant::now();
+            ttl.as_millis() as i64
         }
         Some(None) => -1,
         None => -2,
@@ -268,8 +365,8 @@ pub async fn ttl(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
 
 /// Remove the existing timeout on key, turning the key from volatile (a key with an expire set) to
 /// persistent (a key that will never expire as no timeout is associated).
-pub async fn persist(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    Ok(conn.db().persist(&args[1]))
+pub async fn persist(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
+    Ok(conn.db().persist(&args[0]))
 }
 
 #[cfg(test)]

+ 168 - 126
src/cmd/list.rs

@@ -87,10 +87,10 @@ async fn schedule_blocking_task<F, T>(
     conn: Arc<Connection>,
     keys_to_watch: Vec<Bytes>,
     worker: F,
-    args: Vec<Bytes>,
+    args: VecDeque<Bytes>,
     timeout: Option<Instant>,
 ) where
-    F: Fn(Arc<Connection>, Vec<Bytes>, usize) -> T + Send + Sync + 'static,
+    F: Fn(Arc<Connection>, VecDeque<Bytes>, usize) -> T + Send + Sync + 'static,
     T: Future<Output = Result<Value, Error>> + Send + Sync + 'static,
 {
     let (mut timeout_sx, mut timeout_rx) = broadcast::channel::<()>(1);
@@ -124,7 +124,7 @@ async fn schedule_blocking_task<F, T>(
 
         loop {
             // Run task
-            match worker(conn.clone(), args.to_vec(), attempt).await {
+            match worker(conn.clone(), args.clone(), attempt).await {
                 Ok(Value::Ignore | Value::Null) => {}
                 Ok(result) => {
                     conn.append_response(result);
@@ -183,13 +183,12 @@ fn parse_timeout(arg: &Bytes) -> Result<Option<Instant>, Error> {
 /// the connection when there are no elements to pop from any of the given lists. An element is
 /// popped from the head of the first list that is non-empty, with the given keys being checked in
 /// the order that they are given.
-pub async fn blpop(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    let blpop_task = |conn: Arc<Connection>, args: Vec<Bytes>, attempt| async move {
-        for key in (1..args.len() - 1) {
-            let key = &args[key];
+pub async fn blpop(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let blpop_task = |conn: Arc<Connection>, args: VecDeque<Bytes>, attempt| async move {
+        for key in args.iter() {
             match remove_element(&conn, key, None, true) {
                 Ok(Value::Null) => (),
-                Ok(n) => return Ok(vec![Value::new(&key), n].into()),
+                Ok(n) => return Ok(vec![Value::Blob(key.clone()), n].into()),
                 Err(x) => {
                     if attempt == 1 {
                         return Err(x);
@@ -201,13 +200,12 @@ pub async fn blpop(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
     };
 
     if conn.is_executing_tx() {
-        return blpop_task(conn.clone(), args.to_vec(), 1).await;
+        return blpop_task(conn.clone(), args, 1).await;
     }
 
     let timeout = parse_timeout(&args[args.len() - 1])?;
     let conn = conn.clone();
-    let args = args.to_vec();
-    let keys_to_watch = (&args[1..args.len() - 1]).to_vec();
+    let keys_to_watch = args.iter().map(|c| c.clone()).collect::<Vec<_>>();
 
     conn.block();
 
@@ -227,19 +225,19 @@ pub async fn blpop(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
 /// RIGHT LEFT is equivalent.
 ///
 /// See LMOVE for more information.
-pub async fn blmove(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
+pub async fn blmove(conn: &Connection, mut args: VecDeque<Bytes>) -> Result<Value, Error> {
     if conn.is_executing_tx() {
-        return lmove(&conn, &args).await;
+        return lmove(&conn, args).await;
     }
 
-    let timeout = parse_timeout(&args[5])?;
-    let keys_to_watch = (&args[1..=2]).to_vec();
+    let timeout = parse_timeout(&args[4])?;
+    let keys_to_watch = vec![args[0].clone(), args[1].clone()];
 
     schedule_blocking_task(
         conn.clone(),
         keys_to_watch,
-        |conn, args, _| async move { lmove(&conn, &args).await },
-        args.to_vec(),
+        |conn, args, _| async move { lmove(&conn, args).await },
+        args,
         timeout,
     )
     .await;
@@ -253,17 +251,16 @@ pub async fn blmove(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
 /// is empty, Redis will block the connection until another client pushes to it
 /// or until timeout is reached. A timeout of zero can be used to block
 /// indefinitely.
-pub async fn brpoplpush(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
+pub async fn brpoplpush(conn: &Connection, mut args: VecDeque<Bytes>) -> Result<Value, Error> {
     blmove(
         conn,
-        &[
-            "blmove".into(),
-            args[1].clone(),
-            args[2].clone(),
+        VecDeque::from([
+            args.pop_front().ok_or(Error::Syntax)?,
+            args.pop_front().ok_or(Error::Syntax)?,
             "RIGHT".into(),
             "LEFT".into(),
-            args[3].clone(),
-        ],
+            args.pop_front().ok_or(Error::Syntax)?,
+        ]),
     )
     .await
 }
@@ -272,13 +269,12 @@ pub async fn brpoplpush(conn: &Connection, args: &[Bytes]) -> Result<Value, Erro
 /// the connection when there are no elements to pop from any of the given lists. An element is
 /// popped from the tail of the first list that is non-empty, with the given keys being checked in
 /// the order that they are given.
-pub async fn brpop(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    let brpop_task = |conn: Arc<Connection>, args: Vec<Bytes>, attempt| async move {
-        for key in (1..args.len() - 1) {
-            let key = &args[key];
+pub async fn brpop(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let brpop_task = |conn: Arc<Connection>, args: VecDeque<Bytes>, attempt| async move {
+        for key in args.iter() {
             match remove_element(&conn, key, None, false) {
                 Ok(Value::Null) => (),
-                Ok(n) => return Ok(vec![Value::new(&key), n].into()),
+                Ok(n) => return Ok(vec![Value::Blob(key.clone()), n].into()),
                 Err(x) => {
                     if attempt == 1 {
                         return Err(x);
@@ -290,20 +286,13 @@ pub async fn brpop(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
     };
 
     if conn.is_executing_tx() {
-        return brpop_task(conn.clone(), args.to_vec(), 1).await;
+        return brpop_task(conn.clone(), args, 1).await;
     }
 
     let timeout = parse_timeout(&args[args.len() - 1])?;
-    let keys_to_watch = (&args[1..args.len() - 1]).to_vec();
+    let keys_to_watch = args.iter().cloned().collect();
 
-    schedule_blocking_task(
-        conn.clone(),
-        keys_to_watch,
-        brpop_task,
-        args.to_vec(),
-        timeout,
-    )
-    .await;
+    schedule_blocking_task(conn.clone(), keys_to_watch, brpop_task, args, timeout).await;
 
     Ok(Value::Ignore)
 }
@@ -312,12 +301,12 @@ pub async fn brpop(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
 /// means the first element, 1 the second element and so on. Negative indices can be used to
 /// designate elements starting at the tail of the list. Here, -1 means the last element, -2 means
 /// the penultimate and so forth.
-pub async fn lindex(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
+pub async fn lindex(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
     conn.db().get_map_or(
-        &args[1],
+        &args[0],
         |v| match v {
             Value::List(x) => {
-                let mut index: i64 = bytes_to_number(&args[2])?;
+                let mut index: i64 = bytes_to_number(&args[1])?;
                 let x = x.read();
 
                 let index = if index < 0 {
@@ -339,20 +328,26 @@ pub async fn lindex(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
 /// Inserts element in the list stored at key either before or after the reference value pivot.
 ///
 /// When key does not exist, it is considered an empty list and no operation is performed.
-pub async fn linsert(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    let is_before = if check_arg!(args, 2, "BEFORE") {
-        true
-    } else if check_arg!(args, 2, "AFTER") {
-        false
-    } else {
-        return Err(Error::Syntax);
+pub async fn linsert(conn: &Connection, mut args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let key = args.pop_front().ok_or(Error::Syntax)?;
+    let direction = args.pop_front().ok_or(Error::Syntax)?;
+    let pivot = args.pop_front().ok_or(Error::Syntax)?;
+    let value = args.pop_front().ok_or(Error::Syntax)?;
+
+    let is_before = match String::from_utf8_lossy(&direction)
+        .to_ascii_uppercase()
+        .as_str()
+    {
+        "BEFORE" => true,
+        "AFTER" => false,
+        _ => return Err(Error::Syntax),
     };
 
     let result = conn.db().get_map_or(
-        &args[1],
+        &key,
         |v| match v {
             Value::List(x) => {
-                let pivot = checksum::Ref::new(&args[3]);
+                let pivot = checksum::Ref::new(&pivot);
                 let mut x = x.write();
                 let mut found = false;
 
@@ -360,7 +355,7 @@ pub async fn linsert(conn: &Connection, args: &[Bytes]) -> Result<Value, Error>
                     if *val == pivot {
                         let id = if is_before { key } else { key + 1 };
 
-                        let value = checksum::Value::new(args[4].clone());
+                        let value = checksum::Value::new(value);
 
                         if id > x.len() {
                             x.push_back(value);
@@ -384,16 +379,16 @@ pub async fn linsert(conn: &Connection, args: &[Bytes]) -> Result<Value, Error>
         || Ok(0.into()),
     )?;
 
-    conn.db().bump_version(&args[1]);
+    conn.db().bump_version(&key);
 
     Ok(result)
 }
 
 /// Returns the length of the list stored at key. If key does not exist, it is interpreted as an
 /// empty list and 0 is returned. An error is returned when the value stored at key is not a list.
-pub async fn llen(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
+pub async fn llen(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
     conn.db().get_map_or(
-        &args[1],
+        &args[0],
         |v| match v {
             Value::List(x) => Ok(x.read().len().into()),
             _ => Err(Error::WrongType),
@@ -405,35 +400,39 @@ pub async fn llen(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
 /// Atomically returns and removes the first/last element (head/tail depending on the wherefrom
 /// argument) of the list stored at source, and pushes the element at the first/last element
 /// (head/tail depending on the whereto argument) of the list stored at destination.
-pub async fn lmove(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    let source_is_left = if check_arg!(args, 3, "LEFT") {
+pub async fn lmove(conn: &Connection, mut args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let source_is_left = if check_arg!(args, 2, "LEFT") {
         true
-    } else if check_arg!(args, 3, "RIGHT") {
+    } else if check_arg!(args, 2, "RIGHT") {
         false
     } else {
         return Err(Error::Syntax);
     };
 
-    let target_is_left = if check_arg!(args, 4, "LEFT") {
+    let target_is_left = if check_arg!(args, 3, "LEFT") {
         true
-    } else if check_arg!(args, 4, "RIGHT") {
+    } else if check_arg!(args, 3, "RIGHT") {
         false
     } else {
         return Err(Error::Syntax);
     };
 
+    let source = args.pop_front().ok_or(Error::Syntax)?;
+    let destination = args.pop_front().ok_or(Error::Syntax)?;
+    let to_lock = vec![source.clone(), destination.clone()];
+
     let db = conn.db();
 
     /// Lock keys to alter exclusively
-    db.lock_keys(&args[1..=2]);
+    db.lock_keys(&to_lock);
 
     let mut to_create = None;
 
     let result = db.get_map_or(
-        &args[1],
+        &source,
         |v| match v {
             Value::List(source) => conn.db().get_map_or(
-                &args[2],
+                &destination,
                 |v| match v {
                     Value::List(target) => {
                         let element = if source_is_left {
@@ -481,15 +480,15 @@ pub async fn lmove(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
     );
 
     if let Some(to_create) = to_create {
-        conn.db().set(&args[2], to_create.into(), None);
+        conn.db().set(destination.clone(), to_create.into(), None);
     }
 
     /// release the lock on keys
-    db.unlock_keys(&args[1..=2]);
+    db.unlock_keys(&to_lock);
 
     if result != Ok(Value::Null) {
-        conn.db().bump_version(&args[1]);
-        conn.db().bump_version(&args[2]);
+        conn.db().bump_version(&source);
+        conn.db().bump_version(&destination);
     }
 
     result
@@ -500,26 +499,26 @@ pub async fn lmove(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
 /// By default, the command pops a single element from the beginning of the list. When provided
 /// with the optional count argument, the reply will consist of up to count elements, depending on
 /// the list's length.
-pub async fn lpop(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    let count = match args.get(2) {
+pub async fn lpop(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let count = match args.get(1) {
         Some(v) => Some(bytes_to_number(&v)?),
         None => None,
     };
 
-    remove_element(conn, &args[1], count, true)
+    remove_element(conn, &args[0], count, true)
 }
 
 /// The command returns the index of matching elements inside a Redis list. By default, when no
 /// options are given, it will scan the list from head to tail, looking for the first match of
 /// "element". If the element is found, its index (the zero-based position in the list) is
 /// returned. Otherwise, if no match is found, nil is returned.
-pub async fn lpos(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    let element = checksum::Ref::new(&args[2]);
+pub async fn lpos(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let element = checksum::Ref::new(&args[1]);
     let mut rank = None;
     let mut count = None;
     let mut max_len = None;
 
-    let mut index = 3;
+    let mut index = 2;
     loop {
         if args.len() <= index {
             break;
@@ -552,7 +551,7 @@ pub async fn lpos(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
     let max_len = max_len.unwrap_or_default();
 
     conn.db().get_map_or(
-        &args[1],
+        &args[0],
         |v| match v {
             Value::List(x) => {
                 let x = x.read();
@@ -646,15 +645,14 @@ pub async fn lpos(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
 /// the list, from the leftmost element to the rightmost element. So for instance the command LPUSH
 /// mylist a b c will result into a list containing c as first element, b as second element and a
 /// as third element.
-pub async fn lpush(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    let is_push_x = check_arg!(args, 0, "LPUSHX");
-
+pub async fn lpush(conn: &Connection, mut args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let key = args.pop_front().ok_or(Error::Syntax)?;
     let result = conn.db().get_map_or(
-        &args[1],
+        &key,
         |v| match v {
             Value::List(x) => {
                 let mut x = x.write();
-                for val in args.iter().skip(2) {
+                for val in args.clone().into_iter() {
                     x.push_front(checksum::Value::new(val.clone()));
                 }
                 Ok(x.len().into())
@@ -662,23 +660,43 @@ pub async fn lpush(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
             _ => Err(Error::WrongType),
         },
         || {
-            if is_push_x {
-                return Ok(0.into());
-            }
             let mut h = VecDeque::new();
 
-            for val in args.iter().skip(2) {
-                h.push_front(checksum::Value::new(val.clone()));
+            for val in args.clone().into_iter() {
+                h.push_front(checksum::Value::new(val));
             }
 
             let len = h.len();
-            conn.db().set(&args[1], h.into(), None);
+            conn.db().set(key.clone(), h.into(), None);
             Ok(len.into())
         },
     )?;
 
-    conn.db().bump_version(&args[1]);
+    conn.db().bump_version(&key);
+    Ok(result)
+}
+
+/// LPUSHX key element
+pub async fn lpushx(conn: &Connection, mut args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let key = args.pop_front().ok_or(Error::Syntax)?;
+    let result = conn.db().get_map_or(
+        &key,
+        |v| match v {
+            Value::List(x) => {
+                let mut x = x.write();
+                for val in args.into_iter() {
+                    x.push_front(checksum::Value::new(val));
+                }
+                Ok(x.len().into())
+            }
+            _ => Err(Error::WrongType),
+        },
+        || {
+            return Ok(0.into());
+        },
+    )?;
 
+    conn.db().bump_version(&key);
     Ok(result)
 }
 
@@ -688,13 +706,13 @@ pub async fn lpush(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
 ///
 /// These offsets can also be negative numbers indicating offsets starting at the end of the list.
 /// For example, -1 is the last element of the list, -2 the penultimate, and so on.
-pub async fn lrange(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
+pub async fn lrange(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
     conn.db().get_map_or(
-        &args[1],
+        &args[0],
         |v| match v {
             Value::List(x) => {
-                let start: i64 = bytes_to_number(&args[2])?;
-                let end: i64 = bytes_to_number(&args[3])?;
+                let start: i64 = bytes_to_number(&args[1])?;
+                let end: i64 = bytes_to_number(&args[2])?;
                 let mut ret = vec![];
                 let x = x.read();
 
@@ -731,13 +749,13 @@ pub async fn lrange(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
 }
 
 /// Removes the first count occurrences of elements equal to element from the list stored at key
-pub async fn lrem(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
+pub async fn lrem(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
     let result = conn.db().get_map_or(
-        &args[1],
+        &args[0],
         |v| match v {
             Value::List(x) => {
-                let element = checksum::Ref::new(&args[3]);
-                let limit: i64 = bytes_to_number(&args[2])?;
+                let element = checksum::Ref::new(&args[2]);
+                let limit: i64 = bytes_to_number(&args[1])?;
                 let mut x = x.write();
 
                 let (is_reverse, limit) = if limit < 0 {
@@ -777,7 +795,7 @@ pub async fn lrem(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
         || Ok(0.into()),
     )?;
 
-    conn.db().bump_version(&args[1]);
+    conn.db().bump_version(&args[0]);
 
     Ok(result)
 }
@@ -786,12 +804,16 @@ pub async fn lrem(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
 /// LINDEX.
 ///
 /// An error is returned for out of range indexes.
-pub async fn lset(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
+pub async fn lset(conn: &Connection, mut args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let key = args.pop_front().ok_or(Error::Syntax)?;
+    let index = args.pop_front().ok_or(Error::Syntax)?;
+    let value = args.pop_front().ok_or(Error::Syntax)?;
+
     let result = conn.db().get_map_or(
-        &args[1],
+        &key,
         |v| match v {
             Value::List(x) => {
-                let mut index: i64 = bytes_to_number(&args[2])?;
+                let mut index: i64 = bytes_to_number(&index)?;
                 let mut x = x.write();
 
                 if index < 0 {
@@ -799,7 +821,7 @@ pub async fn lset(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
                 }
 
                 if let Some(x) = x.get_mut(index as usize) {
-                    *x = checksum::Value::new(args[3].clone());
+                    *x = checksum::Value::new(value);
                     Ok(Value::Ok)
                 } else {
                     Err(Error::OutOfRange)
@@ -810,7 +832,7 @@ pub async fn lset(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
         || Err(Error::NotFound),
     )?;
 
-    conn.db().bump_version(&args[1]);
+    conn.db().bump_version(&key);
 
     Ok(result)
 }
@@ -818,13 +840,13 @@ pub async fn lset(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
 /// Trim an existing list so that it will contain only the specified range of elements specified.
 /// Both start and stop are zero-based indexes, where 0 is the first element of the list (the
 /// head), 1 the next element and so on.
-pub async fn ltrim(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
+pub async fn ltrim(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
     let result = conn.db().get_map_or(
-        &args[1],
+        &args[0],
         |v| match v {
             Value::List(x) => {
-                let mut start: i64 = bytes_to_number(&args[2])?;
-                let mut end: i64 = bytes_to_number(&args[3])?;
+                let mut start: i64 = bytes_to_number(&args[1])?;
+                let mut end: i64 = bytes_to_number(&args[2])?;
                 let mut x = x.write();
 
                 if start < 0 {
@@ -859,27 +881,26 @@ pub async fn ltrim(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
 /// By default, the command pops a single element from the end of the list. When provided with the
 /// optional count argument, the reply will consist of up to count elements, depending on the
 /// list's length.
-pub async fn rpop(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    let count = match args.get(2) {
+pub async fn rpop(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let count = match args.get(1) {
         Some(v) => Some(bytes_to_number(&v)?),
         None => None,
     };
 
-    remove_element(conn, &args[1], count, false)
+    remove_element(conn, &args[0], count, false)
 }
 
 /// Atomically returns and removes the last element (tail) of the list stored at source, and pushes
 /// the element at the first element (head) of the list stored at destination.
-pub async fn rpoplpush(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
+pub async fn rpoplpush(conn: &Connection, mut args: VecDeque<Bytes>) -> Result<Value, Error> {
     lmove(
         conn,
-        &[
-            "lmove".into(),
-            args[1].clone(),
-            args[2].clone(),
+        VecDeque::from([
+            args.pop_front().ok_or(Error::Syntax)?,
+            args.pop_front().ok_or(Error::Syntax)?,
             "RIGHT".into(),
             "LEFT".into(),
-        ],
+        ]),
     )
     .await
 }
@@ -887,39 +908,60 @@ pub async fn rpoplpush(conn: &Connection, args: &[Bytes]) -> Result<Value, Error
 /// Insert all the specified values at the tail of the list stored at key. If key does not exist,
 /// it is created as empty list before performing the push operation. When key holds a value that
 /// is not a list, an error is returned.
-pub async fn rpush(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    let is_push_x = check_arg!(args, 0, "RPUSHX");
-
+pub async fn rpushx(conn: &Connection, mut args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let key = args.pop_front().ok_or(Error::Syntax)?;
     let result = conn.db().get_map_or(
-        &args[1],
+        &key,
         |v| match v {
             Value::List(x) => {
                 let mut x = x.write();
-                for val in args.iter().skip(2) {
-                    x.push_back(checksum::Value::new(val.clone()));
+                for val in args.into_iter() {
+                    x.push_back(checksum::Value::new(val));
                 }
                 Ok(x.len().into())
             }
             _ => Err(Error::WrongType),
         },
         || {
-            if is_push_x {
-                return Ok(0.into());
+            return Ok(0.into());
+        },
+    )?;
+
+    conn.db().bump_version(&key);
+    Ok(result)
+}
+
+/// Insert all the specified values at the tail of the list stored at key. If key does not exist,
+/// it is created as empty list before performing the push operation. When key holds a value that
+/// is not a list, an error is returned.
+pub async fn rpush(conn: &Connection, mut args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let key = args.pop_front().ok_or(Error::Syntax)?;
+    let result = conn.db().get_map_or(
+        &key,
+        |v| match v {
+            Value::List(x) => {
+                let mut x = x.write();
+                for val in args.clone().into_iter() {
+                    x.push_back(checksum::Value::new(val));
+                }
+                Ok(x.len().into())
             }
+            _ => Err(Error::WrongType),
+        },
+        || {
             let mut h = VecDeque::new();
 
-            for val in args.iter().skip(2) {
-                h.push_back(checksum::Value::new(val.clone()));
+            for val in args.clone().into_iter() {
+                h.push_back(checksum::Value::new(val));
             }
 
             let len = h.len();
-            conn.db().set(&args[1], h.into(), None);
+            conn.db().set(key.clone(), h.into(), None);
             Ok(len.into())
         },
     )?;
 
-    conn.db().bump_version(&args[1]);
-
+    conn.db().bump_version(&key);
     Ok(result)
 }
 

+ 5 - 3
src/cmd/metrics.rs

@@ -1,19 +1,21 @@
 //! # Metrics command handlers
+use std::collections::VecDeque;
+
 use crate::{connection::Connection, error::Error, value::Value};
 use bytes::Bytes;
 
 /// Dumps metrics from commands. If no argument is passed all commands' metrics are dump.
 ///
 /// The metrics are serialized as JSON.
-pub async fn metrics(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
+pub async fn metrics(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
     let dispatcher = conn.all_connections().get_dispatcher();
     let mut result: Vec<Value> = vec![];
     let commands = if args.len() == 1 {
         dispatcher.get_all_commands()
     } else {
         let mut commands = vec![];
-        for command in &(args[1..]) {
-            let command = String::from_utf8_lossy(command);
+        for command in args.into_iter() {
+            let command = String::from_utf8_lossy(&command);
             commands.push(dispatcher.get_handler_for_command(&command)?);
         }
         commands

+ 3 - 2
src/cmd/mod.rs

@@ -33,6 +33,7 @@ mod test {
     };
     use bytes::Bytes;
     use std::{
+        collections::VecDeque,
         net::{IpAddr, Ipv4Addr, SocketAddr},
         sync::Arc,
     };
@@ -76,10 +77,10 @@ mod test {
     }
 
     pub async fn run_command(conn: &Connection, cmd: &[&str]) -> Result<Value, Error> {
-        let args: Vec<Bytes> = cmd.iter().map(|s| Bytes::from(s.to_string())).collect();
+        let args: VecDeque<Bytes> = cmd.iter().map(|s| Bytes::from(s.to_string())).collect();
 
         let dispatcher = Dispatcher::new();
-        dispatcher.execute(conn, &args).await
+        dispatcher.execute(conn, args).await
     }
 
     #[tokio::test]

+ 27 - 27
src/cmd/pubsub.rs

@@ -1,16 +1,21 @@
 //! # Pubsub command handlers
+use std::collections::VecDeque;
+
 use crate::{check_arg, connection::Connection, error::Error, value::Value};
 use bytes::Bytes;
 use glob::Pattern;
 
 /// Posts a message to the given channel.
-pub async fn publish(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    Ok(conn.pubsub().publish(&args[1], &args[2]).await.into())
+pub async fn publish(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
+    Ok(conn.pubsub().publish(&args[0], &args[1]).await.into())
 }
 
 /// All pubsub commands
-pub async fn pubsub(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    match String::from_utf8_lossy(&args[1]).to_lowercase().as_str() {
+pub async fn pubsub(conn: &Connection, mut args: VecDeque<Bytes>) -> Result<Value, Error> {
+    match String::from_utf8_lossy(&(args.pop_front().ok_or(Error::Syntax)?))
+        .to_lowercase()
+        .as_str()
+    {
         "channels" => Ok(Value::Array(
             conn.pubsub()
                 .channels()
@@ -22,43 +27,38 @@ pub async fn pubsub(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
         "numpat" => Ok(conn.pubsub().get_number_of_psubscribers().into()),
         "numsub" => Ok(conn
             .pubsub()
-            .get_number_of_subscribers(&args[2..])
+            .get_number_of_subscribers(&args)
             .iter()
-            .map(|(channel, subs)| vec![Value::new(&channel), (*subs).into()])
+            .map(|(channel, subs)| vec![Value::new(channel), (*subs).into()])
             .flatten()
             .collect::<Vec<Value>>()
             .into()),
-        cmd => Err(Error::SubCommandNotFound(
-            cmd.into(),
-            String::from_utf8_lossy(&args[0]).into(),
-        )),
+        cmd => Err(Error::SubCommandNotFound(cmd.into(), "pubsub".into())),
     }
 }
 
 /// Subscribes the client to the specified channels.
-pub async fn subscribe(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
+pub async fn subscribe(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
     let pubsub = conn.pubsub();
+    pubsub.subscribe(args, conn);
+    conn.start_pubsub()
+}
 
-    let channels = &args[1..];
-
-    if check_arg!(args, 0, "PSUBSCRIBE") {
-        pubsub.psubscribe(channels, conn)?;
-    } else {
-        pubsub.subscribe(channels, conn);
-    }
-
+/// Subscribes with a pattern the client to the specified channels.
+pub async fn psubscribe(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let pubsub = conn.pubsub();
+    pubsub.psubscribe(args, conn)?;
     conn.start_pubsub()
 }
 
 /// Unsubscribes the client from the given patterns, or from all of them if none is given.
-pub async fn punsubscribe(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    let channels = if args.len() == 1 {
+pub async fn punsubscribe(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let channels = if args.len() == 0 {
         conn.pubsub_client().psubscriptions()
     } else {
-        (&args[1..])
-            .iter()
+        args.into_iter()
             .map(|channel| {
-                let channel = String::from_utf8_lossy(channel);
+                let channel = String::from_utf8_lossy(&channel);
                 Pattern::new(&channel).map_err(|_| Error::InvalidPattern(channel.to_string()))
             })
             .collect::<Result<Vec<Pattern>, Error>>()?
@@ -70,11 +70,11 @@ pub async fn punsubscribe(conn: &Connection, args: &[Bytes]) -> Result<Value, Er
 }
 
 /// Unsubscribes the client from the given channels, or from all of them if none is given.
-pub async fn unsubscribe(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    let channels = if args.len() == 1 {
+pub async fn unsubscribe(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let channels = if args.len() == 0 {
         conn.pubsub_client().subscriptions()
     } else {
-        (&args[1..]).to_vec()
+        args.into_iter().collect()
     };
 
     let _ = conn.pubsub_client().unsubscribe(&channels, conn);

+ 38 - 28
src/cmd/server.rs

@@ -6,6 +6,7 @@ use crate::{
 use bytes::Bytes;
 use git_version::git_version;
 use std::{
+    collections::VecDeque,
     convert::TryInto,
     ops::Neg,
     time::{SystemTime, UNIX_EPOCH},
@@ -13,9 +14,9 @@ use std::{
 use tokio::time::Duration;
 
 /// Returns Array reply of details about all Redis commands.
-pub async fn command(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
+pub async fn command(conn: &Connection, mut args: VecDeque<Bytes>) -> Result<Value, Error> {
     let dispatcher = conn.all_connections().get_dispatcher();
-    if args.len() == 1 {
+    if args.len() == 0 {
         return Ok(Value::Array(
             dispatcher
                 .get_all_commands()
@@ -25,15 +26,19 @@ pub async fn command(conn: &Connection, args: &[Bytes]) -> Result<Value, Error>
         ));
     }
 
-    match String::from_utf8_lossy(&args[1]).to_lowercase().as_str() {
+    let sub_command = args.pop_front().ok_or(Error::Syntax)?;
+    match String::from_utf8_lossy(&sub_command)
+        .to_lowercase()
+        .as_str()
+    {
         "count" => Ok(dispatcher.get_all_commands().len().into()),
         "info" => {
             let mut result = vec![];
-            for command in &args[2..] {
+            for command in args.into_iter() {
                 result.push(
                     dispatcher
                         .get_handler_for_command(
-                            String::from_utf8_lossy(command).to_string().as_str(),
+                            String::from_utf8_lossy(&command).to_string().as_str(),
                         )
                         .map(|command| command.get_command_info())
                         .unwrap_or_else(|_| Value::Null),
@@ -42,44 +47,50 @@ pub async fn command(conn: &Connection, args: &[Bytes]) -> Result<Value, Error>
             Ok(Value::Array(result))
         }
         "getkeys" => {
-            if args.len() == 2 {
+            if args.len() == 0 {
                 return Err(Error::SubCommandNotFound(
-                    String::from_utf8_lossy(&args[1]).into(),
-                    String::from_utf8_lossy(&args[0]).into(),
+                    String::from_utf8_lossy(&sub_command).into(),
+                    "command".into(),
                 ));
             }
-            let args = &args[2..];
-            let command = dispatcher.get_handler(args)?;
+            let command = dispatcher.get_handler(&args)?;
+            let _ = args.pop_front(); // drop the function name from the list of arguments.
             Ok(Value::Array(
                 command
-                    .get_keys(args)
-                    .iter()
-                    .map(|key| Value::new(*key))
+                    .get_keys(&args, false)
+                    .into_iter()
+                    .map(|p| Value::Blob(p))
                     .collect(),
             ))
         }
         "help" => super::help::command(),
-        cmd => Err(Error::SubCommandNotFound(
-            cmd.into(),
-            String::from_utf8_lossy(&args[0]).into(),
-        )),
+        cmd => Err(Error::SubCommandNotFound(cmd.into(), "command".into())),
     }
 }
 
 /// The DEBUG command is an internal command. It is meant to be used for
 /// developing and testing Redis.
-pub async fn debug(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    match String::from_utf8_lossy(&args[1]).to_lowercase().as_str() {
-        "object" => Ok(conn.db().debug(try_get_arg!(args, 2))?.into()),
+pub async fn debug(conn: &Connection, mut args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let sub_command = args.pop_front().ok_or(Error::Syntax)?;
+    match String::from_utf8_lossy(&sub_command)
+        .to_lowercase()
+        .as_str()
+    {
+        "object" => Ok(conn
+            .db()
+            .debug(&(args.pop_front().ok_or(Error::Syntax)?))?
+            .into()),
         "set-active-expire" => Ok(Value::Ok),
-        "digest-value" => Ok(Value::Array(conn.db().digest(&args[2..])?)),
+        "digest-value" => Ok(Value::Array(
+            conn.db().digest(&(args.into_iter().collect::<Vec<_>>()))?,
+        )),
         _ => Err(Error::Syntax),
     }
 }
 
 /// The INFO command returns information and statistics about the server in a
 /// format that is simple to parse by computers and easy to read by humans.
-pub async fn info(conn: &Connection, _: &[Bytes]) -> Result<Value, Error> {
+pub async fn info(conn: &Connection, _: VecDeque<Bytes>) -> Result<Value, Error> {
     let connections = conn.all_connections();
     Ok(Value::Blob(
         format!(
@@ -89,19 +100,18 @@ pub async fn info(conn: &Connection, _: &[Bytes]) -> Result<Value, Error> {
             connections.total_connections(),
             connections.total_blocked_connections(),
         )
-        .as_str()
         .into(),
     ))
 }
 
 /// Delete all the keys of the currently selected DB. This command never fails.
-pub async fn flushdb(conn: &Connection, _: &[Bytes]) -> Result<Value, Error> {
+pub async fn flushdb(conn: &Connection, _: VecDeque<Bytes>) -> Result<Value, Error> {
     conn.db().flushdb()
 }
 
 /// Delete all the keys of all the existing databases, not just the currently
 /// selected one. This command never fails.
-pub async fn flushall(conn: &Connection, _: &[Bytes]) -> Result<Value, Error> {
+pub async fn flushall(conn: &Connection, _: VecDeque<Bytes>) -> Result<Value, Error> {
     conn.all_connections()
         .get_databases()
         .into_iter()
@@ -112,7 +122,7 @@ pub async fn flushall(conn: &Connection, _: &[Bytes]) -> Result<Value, Error> {
 }
 
 /// Return the number of keys in the currently-selected database.
-pub async fn dbsize(conn: &Connection, _: &[Bytes]) -> Result<Value, Error> {
+pub async fn dbsize(conn: &Connection, _: VecDeque<Bytes>) -> Result<Value, Error> {
     conn.db().len().map(|s| s.into())
 }
 
@@ -120,7 +130,7 @@ pub async fn dbsize(conn: &Connection, _: &[Bytes]) -> Result<Value, Error> {
 /// Unix timestamp and the amount of microseconds already elapsed in the current
 /// second. Basically the interface is very similar to the one of the
 /// gettimeofday system call.
-pub async fn time(_conn: &Connection, _args: &[Bytes]) -> Result<Value, Error> {
+pub async fn time(_conn: &Connection, _args: VecDeque<Bytes>) -> Result<Value, Error> {
     let now = SystemTime::now();
     let since_the_epoch = now.duration_since(UNIX_EPOCH).expect("Time went backwards");
     let seconds = format!("{}", since_the_epoch.as_secs());
@@ -131,7 +141,7 @@ pub async fn time(_conn: &Connection, _args: &[Bytes]) -> Result<Value, Error> {
 
 /// Ask the server to close the connection. The connection is closed as soon as
 /// all pending replies have been written to the client.
-pub async fn quit(_: &Connection, _: &[Bytes]) -> Result<Value, Error> {
+pub async fn quit(_: &Connection, _: VecDeque<Bytes>) -> Result<Value, Error> {
     Err(Error::Quit)
 }
 

+ 99 - 81
src/cmd/set.rs

@@ -2,16 +2,19 @@
 use crate::{connection::Connection, error::Error, value::bytes_to_number, value::Value};
 use bytes::Bytes;
 use rand::Rng;
-use std::{cmp::min, collections::HashSet};
+use std::{
+    cmp::min,
+    collections::{HashSet, VecDeque},
+};
 
-fn store(conn: &Connection, key: &Bytes, values: &[Value]) -> i64 {
+fn store_key_values(conn: &Connection, key: Bytes, values: Vec<Value>) -> i64 {
     #[allow(clippy::mutable_key_type)]
     let mut x = HashSet::new();
     let mut len = 0;
 
-    for val in values.iter() {
+    for val in values.into_iter() {
         if let Value::Blob(blob) = val {
-            if x.insert(blob.clone().freeze()) {
+            if x.insert(blob) {
                 len += 1;
             }
         }
@@ -20,17 +23,22 @@ fn store(conn: &Connection, key: &Bytes, values: &[Value]) -> i64 {
     len
 }
 
-async fn compare_sets<F1>(conn: &Connection, keys: &[Bytes], op: F1) -> Result<Value, Error>
+async fn compare_sets<F1>(
+    conn: &Connection,
+    mut keys: VecDeque<Bytes>,
+    op: F1,
+) -> Result<Value, Error>
 where
     F1: Fn(&mut HashSet<Bytes>, &HashSet<Bytes>) -> bool,
 {
+    let top_key = keys.pop_front().ok_or(Error::Syntax)?;
     conn.db().get_map_or(
-        &keys[0],
+        &top_key,
         |v| match v {
             Value::Set(x) => {
                 #[allow(clippy::mutable_key_type)]
                 let mut all_entries = x.read().clone();
-                for key in keys[1..].iter() {
+                for key in keys.iter() {
                     let mut do_break = false;
                     let mut found = false;
                     let _ = conn.db().get_map_or(
@@ -66,7 +74,7 @@ where
         || {
             #[allow(clippy::mutable_key_type)]
             let mut all_entries: HashSet<Bytes> = HashSet::new();
-            for key in keys[1..].iter() {
+            for key in keys.iter() {
                 let mut do_break = false;
                 let _ = conn.db().get_map_or(
                     key,
@@ -98,17 +106,19 @@ where
 /// Add the specified members to the set stored at key. Specified members that are already a member
 /// of this set are ignored. If key does not exist, a new set is created before adding the
 /// specified members.
-pub async fn sadd(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
+pub async fn sadd(conn: &Connection, mut args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let key = args.pop_front().ok_or(Error::Syntax)?;
+    let key_for_not_found = key.clone();
     let result = conn.db().get_map_or(
-        &args[1],
+        &key,
         |v| match v {
             Value::Set(x) => {
                 let mut x = x.write();
 
                 let mut len = 0;
 
-                for val in (&args[2..]).iter() {
-                    if x.insert(val.clone()) {
+                for val in args.clone().into_iter() {
+                    if x.insert(val) {
                         len += 1;
                     }
                 }
@@ -122,27 +132,26 @@ pub async fn sadd(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
             let mut x = HashSet::new();
             let mut len = 0;
 
-            for val in (&args[2..]).iter() {
-                if x.insert(val.clone()) {
+            for val in args.clone().into_iter() {
+                if x.insert(val) {
                     len += 1;
                 }
             }
 
-            conn.db().set(&args[1], x.into(), None);
-
+            conn.db().set(key_for_not_found, x.into(), None);
             Ok(len.into())
         },
     )?;
 
-    conn.db().bump_version(&args[1]);
+    conn.db().bump_version(&key);
 
     Ok(result)
 }
 
 /// Returns the set cardinality (number of elements) of the set stored at key.
-pub async fn scard(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
+pub async fn scard(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
     conn.db().get_map_or(
-        &args[1],
+        &args[0],
         |v| match v {
             Value::Set(x) => Ok(x.read().len().into()),
             _ => Err(Error::WrongType),
@@ -155,8 +164,8 @@ pub async fn scard(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
 /// successive sets.
 ///
 /// Keys that do not exist are considered to be empty sets.
-pub async fn sdiff(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    compare_sets(conn, &args[1..], |all_entries, elements| {
+pub async fn sdiff(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
+    compare_sets(conn, args, |all_entries, elements| {
         for element in elements.iter() {
             if all_entries.contains(element) {
                 all_entries.remove(element);
@@ -171,12 +180,13 @@ pub async fn sdiff(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
 /// destination.
 ///
 /// If destination already exists, it is overwritten.
-pub async fn sdiffstore(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    if let Value::Array(values) = sdiff(conn, &args[1..]).await? {
+pub async fn sdiffstore(conn: &Connection, mut args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let key_name = args.pop_front().ok_or(Error::Syntax)?;
+    if let Value::Array(values) = sdiff(conn, args).await? {
         if values.len() > 0 {
-            Ok(store(conn, &args[1], &values).into())
+            Ok(store_key_values(conn, key_name, values).into())
         } else {
-            let _ = conn.db().del(&[args[1].clone()]);
+            let _ = conn.db().del(&[key_name]);
             Ok(0.into())
         }
     } else {
@@ -189,8 +199,8 @@ pub async fn sdiffstore(conn: &Connection, args: &[Bytes]) -> Result<Value, Erro
 /// Keys that do not exist are considered to be empty sets. With one of the keys being an empty
 /// set, the resulting set is also empty (since set intersection with an empty set always results
 /// in an empty set).
-pub async fn sinter(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    compare_sets(conn, &args[1..], |all_entries, elements| {
+pub async fn sinter(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
+    compare_sets(conn, args, |all_entries, elements| {
         all_entries.retain(|element| elements.contains(element));
 
         for element in elements.iter() {
@@ -211,7 +221,7 @@ pub async fn sinter(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
 /// Keys that do not exist are considered to be empty sets. With one of the keys being an empty
 /// set, the resulting set is also empty (since set intersection with an empty set always results
 /// in an empty set).
-pub async fn sintercard(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
+pub async fn sintercard(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
     if let Ok(Value::Array(x)) = sinter(conn, args).await {
         Ok(x.len().into())
     } else {
@@ -223,12 +233,13 @@ pub async fn sintercard(conn: &Connection, args: &[Bytes]) -> Result<Value, Erro
 /// destination.
 ///
 /// If destination already exists, it is overwritten.
-pub async fn sinterstore(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    if let Value::Array(values) = sinter(conn, &args[1..]).await? {
+pub async fn sinterstore(conn: &Connection, mut args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let key_name = args.pop_front().ok_or(Error::Syntax)?;
+    if let Value::Array(values) = sinter(conn, args).await? {
         if values.len() > 0 {
-            Ok(store(conn, &args[1], &values).into())
+            Ok(store_key_values(conn, key_name, values).into())
         } else {
-            let _ = conn.db().del(&[args[1].clone()]);
+            let _ = conn.db().del(&[key_name]);
             Ok(0.into())
         }
     } else {
@@ -237,12 +248,12 @@ pub async fn sinterstore(conn: &Connection, args: &[Bytes]) -> Result<Value, Err
 }
 
 /// Returns if member is a member of the set stored at key.
-pub async fn sismember(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
+pub async fn sismember(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
     conn.db().get_map_or(
-        &args[1],
+        &args[0],
         |v| match v {
             Value::Set(x) => {
-                if x.read().contains(&args[2]) {
+                if x.read().contains(&args[1]) {
                     Ok(1.into())
                 } else {
                     Ok(0.into())
@@ -257,9 +268,9 @@ pub async fn sismember(conn: &Connection, args: &[Bytes]) -> Result<Value, Error
 /// Returns all the members of the set value stored at key.
 ///
 /// This has the same effect as running SINTER with one argument key.
-pub async fn smembers(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
+pub async fn smembers(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
     conn.db().get_map_or(
-        &args[1],
+        &args[0],
         |v| match v {
             Value::Set(x) => Ok(x
                 .read()
@@ -277,13 +288,14 @@ pub async fn smembers(conn: &Connection, args: &[Bytes]) -> Result<Value, Error>
 ///
 /// For every member, 1 is returned if the value is a member of the set, or 0 if the element is not
 /// a member of the set or if key does not exist.
-pub async fn smismember(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
+pub async fn smismember(conn: &Connection, mut args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let key = args.pop_front().ok_or(Error::Syntax)?;
     conn.db().get_map_or(
-        &args[1],
+        &key,
         |v| match v {
             Value::Set(x) => {
                 let x = x.read();
-                Ok((&args[2..])
+                Ok(args
                     .iter()
                     .map(|member| if x.contains(member) { 1 } else { 0 })
                     .collect::<Vec<i32>>()
@@ -291,13 +303,7 @@ pub async fn smismember(conn: &Connection, args: &[Bytes]) -> Result<Value, Erro
             }
             _ => Err(Error::WrongType),
         },
-        || {
-            Ok((&args[2..])
-                .iter()
-                .map(|_| 0.into())
-                .collect::<Vec<Value>>()
-                .into())
-        },
+        || Ok(args.iter().map(|_| 0.into()).collect::<Vec<Value>>().into()),
     )
 }
 
@@ -309,26 +315,33 @@ pub async fn smismember(conn: &Connection, args: &[Bytes]) -> Result<Value, Erro
 /// performed and 0 is returned. Otherwise, the element is removed from the source set and added to
 /// the destination set. When the specified element already exists in the destination set, it is
 /// only removed from the source set.
-pub async fn smove(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
+///
+/// TODO: FIXME: This implementation is flaky. It should be rewritten to use a new db
+/// method that allows to return multiple keys, even if they are stored in the
+/// same bucked. Right now, this can block a connection
+pub async fn smove(conn: &Connection, mut args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let source = args.pop_front().ok_or(Error::Syntax)?;
+    let destination = args.pop_front().ok_or(Error::Syntax)?;
+    let member = args.pop_front().ok_or(Error::Syntax)?;
     let result = conn.db().get_map_or(
-        &args[1],
+        &source,
         |v| match v {
             Value::Set(set1) => conn.db().get_map_or(
-                &args[2],
+                &destination,
                 |v| match v {
                     Value::Set(set2) => {
                         let mut set1 = set1.write();
-                        if !set1.contains(&args[3]) {
+                        if !set1.contains(&member) {
                             return Ok(0.into());
                         }
 
-                        if args[1] == args[2] {
+                        if source == destination {
                             return Ok(1.into());
                         }
 
                         let mut set2 = set2.write();
-                        set1.remove(&args[3]);
-                        if set2.insert(args[3].clone()) {
+                        set1.remove(&member);
+                        if set2.insert(member.clone()) {
                             Ok(1.into())
                         } else {
                             Ok(0.into())
@@ -337,11 +350,11 @@ pub async fn smove(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
                     _ => Err(Error::WrongType),
                 },
                 || {
-                    set1.write().remove(&args[3]);
+                    set1.write().remove(&member);
                     #[allow(clippy::mutable_key_type)]
                     let mut x = HashSet::new();
-                    x.insert(args[3].clone());
-                    conn.db().set(&args[2], x.into(), None);
+                    x.insert(member.clone());
+                    conn.db().set(destination.clone(), x.into(), None);
                     Ok(1.into())
                 },
             ),
@@ -350,8 +363,10 @@ pub async fn smove(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
         || Ok(0.into()),
     )?;
 
-    conn.db().bump_version(&args[1]);
-    conn.db().bump_version(&args[3]);
+    if result == Value::Integer(1) {
+        conn.db().bump_version(&source);
+        conn.db().bump_version(&destination);
+    }
 
     Ok(result)
 }
@@ -364,11 +379,12 @@ pub async fn smove(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
 /// By default, the command pops a single member from the set. When provided with the optional
 /// count argument, the reply will consist of up to count members, depending on the set's
 /// cardinality.
-pub async fn spop(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    let rand = srandmember(conn, args).await?;
+pub async fn spop(conn: &Connection, mut args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let rand = srandmember(conn, args.clone()).await?;
+    let key = args.pop_front().ok_or(Error::Syntax)?;
     let mut should_remove = false;
     let result = conn.db().get_map_or(
-        &args[1],
+        &key,
         |v| match v {
             Value::Set(x) => {
                 let mut x = x.write();
@@ -395,9 +411,9 @@ pub async fn spop(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
     )?;
 
     if should_remove {
-        let _ = conn.db().del(&[args[1].clone()]);
+        let _ = conn.db().del(&[key]);
     } else {
-        conn.db().bump_version(&args[1]);
+        conn.db().bump_version(&key);
     }
 
     Ok(result)
@@ -412,9 +428,9 @@ pub async fn spop(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
 /// If called with a negative count, the behavior changes and the command is allowed to return the
 /// same element multiple times. In this case, the number of returned elements is the absolute
 /// value of the specified count.
-pub async fn srandmember(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
+pub async fn srandmember(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
     conn.db().get_map_or(
-        &args[1],
+        &args[0],
         |v| match v {
             Value::Set(x) => {
                 let mut rng = rand::thread_rng();
@@ -427,7 +443,7 @@ pub async fn srandmember(conn: &Connection, args: &[Bytes]) -> Result<Value, Err
 
                 items.sort_by(|a, b| a.1.cmp(&b.1));
 
-                if args.len() == 2 {
+                if args.len() == 1 {
                     // Two arguments provided, return the first element or null if the array is null
                     if items.is_empty() {
                         Ok(Value::Null)
@@ -439,7 +455,7 @@ pub async fn srandmember(conn: &Connection, args: &[Bytes]) -> Result<Value, Err
                     if items.is_empty() {
                         return Ok(Value::Array(vec![]));
                     }
-                    let len = bytes_to_number::<i64>(&args[2])?;
+                    let len = bytes_to_number::<i64>(&args[1])?;
 
                     if len > 0 {
                         // required length is positive, return *up* to the requested number and no duplicated allowed
@@ -472,7 +488,7 @@ pub async fn srandmember(conn: &Connection, args: &[Bytes]) -> Result<Value, Err
             _ => Err(Error::WrongType),
         },
         || {
-            Ok(if args.len() == 2 {
+            Ok(if args.len() == 1 {
                 Value::Null
             } else {
                 Value::Array(vec![])
@@ -484,19 +500,20 @@ pub async fn srandmember(conn: &Connection, args: &[Bytes]) -> Result<Value, Err
 /// Remove the specified members from the set stored at key. Specified members that are not a
 /// member of this set are ignored. If key does not exist, it is treated as an empty set and this
 /// command returns 0.
-pub async fn srem(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
+pub async fn srem(conn: &Connection, mut args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let key = args.pop_front().ok_or(Error::Syntax)?;
     let result = conn.db().get_map_or(
-        &args[1],
+        &key,
         |v| match v {
             Value::Set(x) => {
                 let mut set = x.write();
                 let mut i = 0;
 
-                for value in (&args[2..]).iter() {
-                    if set.remove(value) {
+                args.into_iter().for_each(|value| {
+                    if set.remove(&value) {
                         i += 1;
                     }
-                }
+                });
 
                 Ok(i.into())
             }
@@ -505,14 +522,14 @@ pub async fn srem(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
         || Ok(0.into()),
     )?;
 
-    conn.db().bump_version(&args[1]);
+    conn.db().bump_version(&key);
 
     Ok(result)
 }
 
 /// Returns the members of the set resulting from the union of all the given sets.
-pub async fn sunion(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    compare_sets(conn, &args[1..], |all_entries, elements| {
+pub async fn sunion(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
+    compare_sets(conn, args, |all_entries, elements| {
         for element in elements.iter() {
             all_entries.insert(element.clone());
         }
@@ -526,12 +543,13 @@ pub async fn sunion(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
 /// destination.
 ///
 /// If destination already exists, it is overwritten.
-pub async fn sunionstore(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    if let Value::Array(values) = sunion(conn, &args[1..]).await? {
+pub async fn sunionstore(conn: &Connection, mut args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let key_name = args.pop_front().ok_or(Error::Syntax)?;
+    if let Value::Array(values) = sunion(conn, args).await? {
         if values.len() > 0 {
-            Ok(store(conn, &args[1], &values).into())
+            Ok(store_key_values(conn, key_name, values).into())
         } else {
-            let _ = conn.db().del(&[args[1].clone()]);
+            let _ = conn.db().del(&[key_name]);
             Ok(0.into())
         }
     } else {

+ 134 - 121
src/cmd/string.rs

@@ -11,7 +11,9 @@ use crate::{
 use bytes::Bytes;
 use std::{
     cmp::min,
+    collections::VecDeque,
     convert::TryInto,
+    f32::consts::E,
     ops::{Bound, Neg},
 };
 use tokio::time::Duration;
@@ -19,37 +21,37 @@ use tokio::time::Duration;
 /// If key already exists and is a string, this command appends the value at the
 /// end of the string. If key does not exist it is created and set as an empty
 /// string, so APPEND will be similar to SET in this special case.
-pub async fn append(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    conn.db().append(&args[1], &args[2])
+pub async fn append(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
+    conn.db().append(&args[0], &args[1])
 }
 
 /// Increments the number stored at key by one. If the key does not exist, it is set to 0 before
 /// performing the operation. An error is returned if the key contains a value of the wrong type or
 /// contains a string that can not be represented as integer. This operation is limited to 64 bit
 /// signed integers.
-pub async fn incr(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    conn.db().incr(&args[1], 1_i64).map(|n| n.into())
+pub async fn incr(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
+    conn.db().incr(&args[0], 1_i64).map(|n| n.into())
 }
 
 /// Increments the number stored at key by increment. If the key does not exist, it is set to 0
 /// before performing the operation. An error is returned if the key contains a value of the wrong
 /// type or contains a string that can not be represented as integer. This operation is limited to
 /// 64 bit signed integers.
-pub async fn incr_by(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    let by: i64 = bytes_to_number(&args[2])?;
-    conn.db().incr(&args[1], by).map(|n| n.into())
+pub async fn incr_by(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let by: i64 = bytes_to_number(&args[1])?;
+    conn.db().incr(&args[0], by).map(|n| n.into())
 }
 
 /// Increment the string representing a floating point number stored at key by the specified
 /// increment. By using a negative increment value, the result is that the value stored at the key
 /// is decremented (by the obvious properties of addition). If the key does not exist, it is set to
 /// 0 before performing the operation.
-pub async fn incr_by_float(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    let by = bytes_to_number::<Float>(&args[2])?;
+pub async fn incr_by_float(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let by = bytes_to_number::<Float>(&args[1])?;
     if by.is_infinite() || by.is_nan() {
         return Err(Error::IncrByInfOrNan);
     }
-    conn.db().incr(&args[1], by).map(|f| {
+    conn.db().incr(&args[0], by).map(|f| {
         if f.fract() == 0.0 {
             (*f as i64).into()
         } else {
@@ -62,52 +64,52 @@ pub async fn incr_by_float(conn: &Connection, args: &[Bytes]) -> Result<Value, E
 /// performing the operation. An error is returned if the key contains a value of the wrong type or
 /// contains a string that can not be represented as integer. This operation is limited to 64 bit
 /// signed integers.
-pub async fn decr(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    conn.db().incr(&args[1], -1_i64).map(|n| n.into())
+pub async fn decr(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
+    conn.db().incr(&args[0], -1_i64).map(|n| n.into())
 }
 
 /// Decrements the number stored at key by decrement. If the key does not exist, it is set to 0
 /// before performing the operation. An error is returned if the key contains a value of the wrong
 /// type or contains a string that can not be represented as integer. This operation is limited to
 /// 64 bit signed integers.
-pub async fn decr_by(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    let by: i64 = (&Value::new(&args[2])).try_into()?;
-    conn.db().incr(&args[1], by.neg()).map(|n| n.into())
+pub async fn decr_by(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let by: i64 = (&Value::new(&args[1])).try_into()?;
+    conn.db().incr(&args[0], by.neg()).map(|n| n.into())
 }
 
 /// Get the value of key. If the key does not exist the special value nil is returned. An error is
 /// returned if the value stored at key is not a string, because GET only handles string values.
-pub async fn get(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    Ok(conn.db().get(&args[1]))
+pub async fn get(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
+    Ok(conn.db().get(&args[0]))
 }
 
 /// Get the value of key and optionally set its expiration. GETEX is similar to
 /// GET, but is a write command with additional options.
-pub async fn getex(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
+pub async fn getex(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
     let (expires_in, persist) = match args.len() {
-        2 => (None, false),
-        3 => {
-            if check_arg!(args, 2, "PERSIST") {
+        1 => (None, false),
+        2 => {
+            if check_arg!(args, 1, "PERSIST") {
                 (None, true)
             } else {
                 return Err(Error::Syntax);
             }
         }
-        4 => match String::from_utf8_lossy(&args[2]).to_uppercase().as_str() {
+        3 => match String::from_utf8_lossy(&args[1]).to_uppercase().as_str() {
             "EX" => (
-                Some(Expiration::new(&args[3], false, false, &args[0])?),
+                Some(Expiration::new(&args[2], false, false, b"GETEX")?),
                 false,
             ),
             "PX" => (
-                Some(Expiration::new(&args[3], true, false, &args[0])?),
+                Some(Expiration::new(&args[2], true, false, b"GETEX")?),
                 false,
             ),
             "EXAT" => (
-                Some(Expiration::new(&args[3], false, true, &args[0])?),
+                Some(Expiration::new(&args[2], false, true, b"GETEX")?),
                 false,
             ),
             "PXAT" => (
-                Some(Expiration::new(&args[3], true, true, &args[0])?),
+                Some(Expiration::new(&args[2], true, true, b"GETEX")?),
                 false,
             ),
             "PERSIST" => (None, Default::default()),
@@ -116,7 +118,7 @@ pub async fn getex(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
         _ => return Err(Error::Syntax),
     };
     Ok(conn.db().getex(
-        &args[1],
+        &args[0],
         expires_in.map(|t| t.try_into()).transpose()?,
         persist,
     ))
@@ -124,131 +126,129 @@ pub async fn getex(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
 
 /// Get the value of key. If the key does not exist the special value nil is returned. An error is
 /// returned if the value stored at key is not a string, because GET only handles string values.
-pub async fn getrange(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    match conn.db().get(&args[1]) {
-        Value::Blob(binary) => {
-            let start = bytes_to_number::<i64>(&args[2])?;
-            let end = bytes_to_number::<i64>(&args[3])?;
-            let len = binary.len();
-
-            // resolve negative positions
-            let start: usize = if start < 0 {
-                (start + len as i64).try_into().unwrap_or(0)
-            } else {
-                start.try_into().expect("Positive number")
-            };
-
-            // resolve negative positions
-            let end: usize = if end < 0 {
-                if let Ok(val) = (end + len as i64).try_into() {
-                    val
-                } else {
-                    return Ok("".into());
-                }
-            } else {
-                end.try_into().expect("Positive number")
-            };
-            let end = min(end, len.checked_sub(1).unwrap_or_default());
+pub async fn getrange(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let bytes = match conn.db().get(&args[0]) {
+        Value::Blob(binary) => binary,
+        Value::BlobRw(binary) => binary.freeze(),
+        Value::Null => return Ok("".into()),
+        _ => return Err(Error::WrongType),
+    };
 
-            if end < start {
-                return Ok("".into());
-            }
+    let start = bytes_to_number::<i64>(&args[1])?;
+    let end = bytes_to_number::<i64>(&args[2])?;
+    let len = bytes.len();
+
+    // resolve negative positions
+    let start: usize = if start < 0 {
+        (start + len as i64).try_into().unwrap_or(0)
+    } else {
+        start.try_into().expect("Positive number")
+    };
 
-            Ok(Value::Blob(
-                binary
-                    .freeze()
-                    .slice((Bound::Included(start), Bound::Included(end)))
-                    .as_ref()
-                    .into(),
-            ))
+    // resolve negative positions
+    let end: usize = if end < 0 {
+        if let Ok(val) = (end + len as i64).try_into() {
+            val
+        } else {
+            return Ok("".into());
         }
-        Value::Null => Ok("".into()),
-        _ => Err(Error::WrongType),
+    } else {
+        end.try_into().expect("Positive number")
+    };
+    let end = min(end, len.checked_sub(1).unwrap_or_default());
+
+    if end < start {
+        return Ok("".into());
     }
+
+    Ok(Value::Blob(
+        bytes.slice((Bound::Included(start), Bound::Included(end))),
+    ))
 }
 
 /// Get the value of key and delete the key. This command is similar to GET, except for the fact
 /// that it also deletes the key on success (if and only if the key's value type is a string).
-pub async fn getdel(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    Ok(conn.db().getdel(&args[1]))
+pub async fn getdel(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
+    Ok(conn.db().getdel(&args[0]))
 }
 
 /// Atomically sets key to value and returns the old value stored at key. Returns an error when key
 /// exists but does not hold a string value. Any previous time to live associated with the key is
 /// discarded on successful SET operation.
-pub async fn getset(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    Ok(conn.db().getset(&args[1], Value::new(&args[2])))
+pub async fn getset(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
+    Ok(conn.db().getset(&args[0], Value::new(&args[1])))
 }
 
 /// Returns the values of all specified keys. For every key that does not hold a string value or
 /// does not exist, the special value nil is returned. Because of this, the operation never fails.
-pub async fn mget(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    Ok(conn.db().get_multi(&args[1..]))
+pub async fn mget(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
+    Ok(conn.db().get_multi(args))
 }
 
 /// Set key to hold the string value. If key already holds a value, it is overwritten, regardless
 /// of its type. Any previous time to live associated with the key is discarded on successful SET
 /// operation.
-pub async fn set(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    let len = args.len();
-    let mut i = 3;
+pub async fn set(conn: &Connection, mut args: VecDeque<Bytes>) -> Result<Value, Error> {
     let mut expiration = None;
     let mut keep_ttl = false;
     let mut override_value = Override::Yes;
     let mut return_previous = false;
 
+    let command = b"SET";
+    let key = args.pop_front().ok_or(Error::Syntax)?;
+    let value = args.pop_front().ok_or(Error::Syntax)?;
+
     loop {
-        if i >= len {
+        let arg = if let Some(arg) = args.pop_front() {
+            String::from_utf8_lossy(&arg).to_uppercase()
+        } else {
             break;
-        }
-        match String::from_utf8_lossy(&args[i]).to_uppercase().as_str() {
+        };
+
+        match arg.as_str() {
             "EX" => {
                 if expiration.is_some() {
                     return Err(Error::Syntax);
                 }
                 expiration = Some(Expiration::new(
-                    try_get_arg!(args, i + 1),
+                    &args.pop_front().ok_or(Error::Syntax)?,
                     false,
                     false,
-                    &args[0],
+                    command,
                 )?);
-                i += 1;
             }
             "PX" => {
                 if expiration.is_some() {
                     return Err(Error::Syntax);
                 }
                 expiration = Some(Expiration::new(
-                    try_get_arg!(args, i + 1),
+                    &args.pop_front().ok_or(Error::Syntax)?,
                     true,
                     false,
-                    &args[0],
+                    command,
                 )?);
-                i += 1;
             }
             "EXAT" => {
                 if expiration.is_some() {
                     return Err(Error::Syntax);
                 }
                 expiration = Some(Expiration::new(
-                    try_get_arg!(args, i + 1),
+                    &args.pop_front().ok_or(Error::Syntax)?,
                     false,
                     true,
-                    &args[0],
+                    command,
                 )?);
-                i += 1;
             }
             "PXAT" => {
                 if expiration.is_some() {
                     return Err(Error::Syntax);
                 }
                 expiration = Some(Expiration::new(
-                    try_get_arg!(args, i + 1),
+                    &args.pop_front().ok_or(Error::Syntax)?,
                     true,
                     true,
-                    &args[0],
+                    command,
                 )?);
-                i += 1;
             }
             "KEEPTTL" => keep_ttl = true,
             "NX" => override_value = Override::No,
@@ -256,13 +256,11 @@ pub async fn set(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
             "GET" => return_previous = true,
             _ => return Err(Error::Syntax),
         }
-
-        i += 1;
     }
     Ok(
         match conn.db().set_advanced(
-            &args[1],
-            Value::new(&args[2]),
+            key,
+            Value::Blob(value),
             expiration.map(|t| t.try_into()).transpose()?,
             override_value,
             keep_ttl,
@@ -282,11 +280,9 @@ pub async fn set(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
 ///
 /// It is not possible for clients to see that some of the keys were
 /// updated while others are unchanged.
-pub async fn mset(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    conn.db().multi_set(&args[1..], true).map_err(|e| match e {
-        Error::Syntax => {
-            Error::WrongNumberArgument(String::from_utf8_lossy(&args[0]).to_uppercase())
-        }
+pub async fn mset(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
+    conn.db().multi_set(args, true).map_err(|e| match e {
+        Error::Syntax => Error::WrongNumberArgument("MSET".to_owned()),
         e => e,
     })
 }
@@ -301,11 +297,9 @@ pub async fn mset(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
 /// MSETNX is atomic, so all given keys are set at once. It is not possible for
 /// clients to see that some of the keys were updated while others are
 /// unchanged.
-pub async fn msetnx(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    conn.db().multi_set(&args[1..], false).map_err(|e| match e {
-        Error::Syntax => {
-            Error::WrongNumberArgument(String::from_utf8_lossy(&args[0]).to_uppercase())
-        }
+pub async fn msetnx(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
+    conn.db().multi_set(args, false).map_err(|e| match e {
+        Error::Syntax => Error::WrongNumberArgument("MSETNX".to_owned()),
         e => e,
     })
 }
@@ -315,34 +309,51 @@ pub async fn msetnx(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
 ///
 /// SET mykey value
 /// EXPIRE mykey seconds
-pub async fn setex(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    let is_milliseconds = check_arg!(args, 0, "PSETEX");
-
-    let expires_in = Expiration::new(&args[2], is_milliseconds, false, &args[0])?;
+#[inline]
+async fn setex_ex(
+    command: &[u8],
+    is_milliseconds: bool,
+    conn: &Connection,
+    mut args: VecDeque<Bytes>,
+) -> Result<Value, Error> {
+    let key = args.pop_front().ok_or(Error::Syntax)?;
+    let expiration = args.pop_front().ok_or(Error::Syntax)?;
+    let value = args.pop_front().ok_or(Error::Syntax)?;
+
+    let expires_in = Expiration::new(&expiration, is_milliseconds, false, command)?;
 
     Ok(conn
         .db()
-        .set(&args[1], Value::new(&args[3]), Some(expires_in.try_into()?)))
+        .set(key, Value::Blob(value), Some(expires_in.try_into()?)))
+}
+
+/// Set key to hold the string value and set key to timeout after a given number
+/// of seconds. This command is equivalent to:
+pub async fn setex(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
+    setex_ex(b"SETEX", false, conn, args).await
+}
+
+/// PSETEX works exactly like SETEX with the sole difference that the expire
+/// time is specified in milliseconds instead of seconds.
+pub async fn psetex(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
+    setex_ex(b"PSETEX", true, conn, args).await
 }
 
 /// Set key to hold string value if key does not exist. In that case, it is
 /// equal to SET. When key already holds a value, no operation is performed.
 /// SETNX is short for "SET if Not eXists".
-pub async fn setnx(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    Ok(conn.db().set_advanced(
-        &args[1],
-        Value::new(&args[2]),
-        None,
-        Override::No,
-        false,
-        false,
-    ))
+pub async fn setnx(conn: &Connection, mut args: VecDeque<Bytes>) -> Result<Value, Error> {
+    let key = args.pop_front().ok_or(Error::Syntax)?;
+    let value = args.pop_front().ok_or(Error::Syntax)?;
+    Ok(conn
+        .db()
+        .set_advanced(key, Value::Blob(value), None, Override::No, false, false))
 }
 
 /// Returns the length of the string value stored at key. An error is returned when key holds a
 /// non-string value.
-pub async fn strlen(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
-    match conn.db().get(&args[1]) {
+pub async fn strlen(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
+    match conn.db().get(&args[0]) {
         Value::Blob(x) => Ok(x.len().into()),
         Value::String(x) => Ok(x.len().into()),
         Value::Null => Ok(0.into()),
@@ -356,9 +367,9 @@ pub async fn strlen(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
 /// make offset fit. Non-existing keys are considered as empty strings, so this
 /// command will make sure it holds a string large enough to be able to set
 /// value at offset.
-pub async fn setrange(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
+pub async fn setrange(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
     conn.db()
-        .set_range(&args[1], bytes_to_number(&args[2])?, &args[3])
+        .set_range(&args[0], bytes_to_number(&args[1])?, &args[2])
 }
 
 #[cfg(test)]
@@ -741,7 +752,9 @@ mod test {
             run_command(&c, &["setrange", "foo", "30", "xxx"]).await,
         );
         assert_eq!(
-            Ok("\0\0xxx\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0xxx\0\0\0\0\0\0\0xxx".into()),
+            Ok(Value::BlobRw(
+                ("\0\0xxx\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0xxx\0\0\0\0\0\0\0xxx".into())
+            )),
             run_command(&c, &["get", "foo"]).await,
         );
     }

+ 20 - 16
src/cmd/transaction.rs

@@ -1,4 +1,6 @@
 //! # Transaction command handlers
+use std::collections::VecDeque;
+
 use crate::{
     connection::{Connection, ConnectionStatus},
     error::Error,
@@ -10,13 +12,13 @@ use bytes::Bytes;
 /// normal.
 ///
 /// If WATCH was used, DISCARD unwatches all keys watched by the connection
-pub async fn discard(conn: &Connection, _: &[Bytes]) -> Result<Value, Error> {
+pub async fn discard(conn: &Connection, _: VecDeque<Bytes>) -> Result<Value, Error> {
     conn.stop_transaction()
 }
 
 /// Marks the start of a transaction block. Subsequent commands will be queued for atomic execution
 /// using EXEC.
-pub async fn multi(conn: &Connection, _: &[Bytes]) -> Result<Value, Error> {
+pub async fn multi(conn: &Connection, _: VecDeque<Bytes>) -> Result<Value, Error> {
     conn.start_transaction()
 }
 
@@ -25,7 +27,7 @@ pub async fn multi(conn: &Connection, _: &[Bytes]) -> Result<Value, Error> {
 ///
 /// When using WATCH, EXEC will execute commands only if the watched keys were not modified,
 /// allowing for a check-and-set mechanism.
-pub async fn exec(conn: &Connection, _: &[Bytes]) -> Result<Value, Error> {
+pub async fn exec(conn: &Connection, _: VecDeque<Bytes>) -> Result<Value, Error> {
     match conn.status() {
         ConnectionStatus::Multi => Ok(()),
         ConnectionStatus::FailedTx => {
@@ -49,7 +51,7 @@ pub async fn exec(conn: &Connection, _: &[Bytes]) -> Result<Value, Error> {
 
     if let Some(commands) = conn.get_queue_commands() {
         let dispatcher = conn.all_connections().get_dispatcher();
-        for args in commands.iter() {
+        for args in commands.into_iter() {
             let result = dispatcher
                 .execute(conn, args)
                 .await
@@ -65,15 +67,17 @@ pub async fn exec(conn: &Connection, _: &[Bytes]) -> Result<Value, Error> {
 }
 
 /// Marks the given keys to be watched for conditional execution of a transaction.
-pub async fn watch(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
+pub async fn watch(conn: &Connection, args: VecDeque<Bytes>) -> Result<Value, Error> {
     if conn.status() == ConnectionStatus::Multi {
         return Err(Error::WatchInsideTx);
     }
     conn.watch_key(
-        &(&args[1..])
-            .iter()
-            .map(|key| (key, conn.db().get_version(key)))
-            .collect::<Vec<(&Bytes, u128)>>(),
+        args.into_iter()
+            .map(|key| {
+                let v = conn.db().get_version(&key);
+                (key, v)
+            })
+            .collect::<Vec<(Bytes, u128)>>(),
     );
     Ok(Value::Ok)
 }
@@ -81,13 +85,15 @@ pub async fn watch(conn: &Connection, args: &[Bytes]) -> Result<Value, Error> {
 /// Flushes all the previously watched keys for a transaction.
 ///
 /// If you call EXEC or DISCARD, there's no need to manually call UNWATCH.
-pub async fn unwatch(conn: &Connection, _: &[Bytes]) -> Result<Value, Error> {
+pub async fn unwatch(conn: &Connection, _: VecDeque<Bytes>) -> Result<Value, Error> {
     conn.discard_watched_keys();
     Ok(Value::Ok)
 }
 
 #[cfg(test)]
 mod test {
+    use std::collections::VecDeque;
+
     use crate::dispatcher::Dispatcher;
     use crate::{
         cmd::test::{create_connection, run_command},
@@ -287,12 +293,10 @@ mod test {
     }
 
     fn get_keys(args: &[&str]) -> Vec<Bytes> {
-        let args: Vec<Bytes> = args.iter().map(|s| Bytes::from(s.to_string())).collect();
+        let args: VecDeque<Bytes> = args.iter().map(|s| Bytes::from(s.to_string())).collect();
         let d = Dispatcher::new();
-        if let Ok(cmd) = d.get_handler(&args) {
-            cmd.get_keys(&args).iter().map(|k| (*k).clone()).collect()
-        } else {
-            vec![]
-        }
+        d.get_handler(&args)
+            .map(|cmd| cmd.get_keys(&args, true))
+            .unwrap_or_default()
     }
 }

+ 19 - 13
src/connection/mod.rs

@@ -3,7 +3,10 @@ use self::pubsub_server::Pubsub;
 use crate::{db::Db, error::Error, value::Value};
 use bytes::Bytes;
 use parking_lot::RwLock;
-use std::{collections::HashSet, sync::Arc};
+use std::{
+    collections::{HashSet, VecDeque},
+    sync::Arc,
+};
 use tokio::sync::broadcast::{self, Receiver, Sender};
 
 pub mod connections;
@@ -51,7 +54,7 @@ pub struct ConnectionInfo {
     watch_keys: Vec<(Bytes, u128)>,
     tx_keys: HashSet<Bytes>,
     status: ConnectionStatus,
-    commands: Option<Vec<Vec<Bytes>>>,
+    commands: Option<Vec<VecDeque<Bytes>>>,
     is_blocked: bool,
     blocked_notification: Option<Sender<()>>,
     block_id: usize,
@@ -269,11 +272,11 @@ impl Connection {
 
     /// Watches keys. In a transaction watched keys are a mechanism to discard a transaction if
     /// some value changed since the moment the command was queued until the execution time.
-    pub fn watch_key(&self, keys: &[(&Bytes, u128)]) {
+    pub fn watch_key(&self, keys: Vec<(Bytes, u128)>) {
         let watch_keys = &mut self.info.write().watch_keys;
-        keys.iter()
-            .map(|(bytes, version)| {
-                watch_keys.push(((*bytes).clone(), *version));
+        keys.into_iter()
+            .map(|value| {
+                watch_keys.push(value);
             })
             .for_each(drop);
     }
@@ -312,27 +315,30 @@ impl Connection {
     }
 
     /// Queues a command for later execution
-    pub fn queue_command(&self, args: &[Bytes]) {
+    pub fn queue_command(&self, args: VecDeque<Bytes>) {
         let mut info = self.info.write();
         let commands = info.commands.get_or_insert(vec![]);
-        commands.push(args.iter().map(|m| (*m).clone()).collect());
+        commands.push(args);
     }
 
     /// Returns a list of queued commands.
-    pub fn get_queue_commands(&self) -> Option<Vec<Vec<Bytes>>> {
+    pub fn get_queue_commands(&self) -> Option<Vec<VecDeque<Bytes>>> {
         let mut info = self.info.write();
         info.watch_keys = vec![];
         info.status = ConnectionStatus::ExecutingTx;
         info.commands.take()
     }
 
-    /// Returns a lsit of transaction keys
-    pub fn tx_keys(&self, keys: Vec<&Bytes>) {
+    /// Returns a list of transaction keys
+    pub fn tx_keys<T>(&self, keys: T)
+    where
+        T: IntoIterator<Item = Bytes>,
+    {
         #[allow(clippy::mutable_key_type)]
         let tx_keys = &mut self.info.write().tx_keys;
-        keys.iter()
+        keys.into_iter()
             .map(|k| {
-                tx_keys.insert((*k).clone());
+                tx_keys.insert(k);
             })
             .for_each(drop);
     }

+ 12 - 12
src/connection/pubsub_server.rs

@@ -2,10 +2,10 @@
 //!
 //! There is one instance of this mod active per server instance.
 use crate::{connection::Connection, error::Error, value::Value};
-use bytes::Bytes;
+use bytes::{Bytes, BytesMut};
 use glob::Pattern;
 use parking_lot::RwLock;
-use std::collections::HashMap;
+use std::collections::{HashMap, VecDeque};
 use tokio::sync::mpsc;
 
 type Sender = mpsc::Sender<Value>;
@@ -38,7 +38,7 @@ impl Pubsub {
     }
 
     /// Returns numbers of subscribed for given channels
-    pub fn get_number_of_subscribers(&self, channels: &[Bytes]) -> Vec<(Bytes, usize)> {
+    pub fn get_number_of_subscribers(&self, channels: &VecDeque<Bytes>) -> Vec<(Bytes, usize)> {
         let subscribers = self.subscriptions.read();
         let mut ret = vec![];
         for channel in channels.iter() {
@@ -53,11 +53,11 @@ impl Pubsub {
     }
 
     /// Subscribe to patterns
-    pub fn psubscribe(&self, channels: &[Bytes], conn: &Connection) -> Result<(), Error> {
+    pub fn psubscribe(&self, channels: VecDeque<Bytes>, conn: &Connection) -> Result<(), Error> {
         let mut subscriptions = self.psubscriptions.write();
 
-        for bytes_channel in channels.iter() {
-            let channel = String::from_utf8_lossy(bytes_channel);
+        for bytes_channel in channels.into_iter() {
+            let channel = String::from_utf8_lossy(&bytes_channel);
             let channel =
                 Pattern::new(&channel).map_err(|_| Error::InvalidPattern(channel.to_string()))?;
 
@@ -77,7 +77,7 @@ impl Pubsub {
             conn.append_response(
                 vec![
                     "psubscribe".into(),
-                    Value::new(&bytes_channel),
+                    Value::Blob(bytes_channel),
                     conn.pubsub_client().total_subs().into(),
                 ]
                 .into(),
@@ -161,14 +161,14 @@ impl Pubsub {
     }
 
     /// Subscribe connection to channels
-    pub fn subscribe(&self, channels: &[Bytes], conn: &Connection) {
+    pub fn subscribe(&self, channels: VecDeque<Bytes>, conn: &Connection) {
         let mut subscriptions = self.subscriptions.write();
         let total_psubs = self.psubscriptions.read().len();
 
         channels
-            .iter()
+            .into_iter()
             .map(|channel| {
-                if let Some(subs) = subscriptions.get_mut(channel) {
+                if let Some(subs) = subscriptions.get_mut(&channel) {
                     subs.insert(conn.id(), conn.pubsub_client().sender());
                 } else {
                     let mut h = HashMap::new();
@@ -176,11 +176,11 @@ impl Pubsub {
                     subscriptions.insert(channel.clone(), h);
                 }
 
-                conn.pubsub_client().new_subscription(channel);
+                conn.pubsub_client().new_subscription(&channel);
                 conn.append_response(
                     vec![
                         "subscribe".into(),
-                        Value::new(&channel),
+                        Value::Blob(channel),
                         conn.pubsub_client().total_subs().into(),
                     ]
                     .into(),

+ 1 - 0
src/db/entry.rs

@@ -94,6 +94,7 @@ impl Entry {
             &self.value,
             Value::Boolean(_)
                 | Value::Blob(_)
+                | Value::BlobRw(_)
                 | Value::BigInteger(_)
                 | Value::Integer(_)
                 | Value::Float(_)

+ 103 - 82
src/db/mod.rs

@@ -24,7 +24,7 @@ use parking_lot::{Mutex, RwLock};
 use rand::{prelude::SliceRandom, Rng};
 use seahash::hash;
 use std::{
-    collections::HashMap,
+    collections::{HashMap, VecDeque},
     convert::{TryFrom, TryInto},
     ops::{AddAssign, Deref},
     str::FromStr,
@@ -231,13 +231,12 @@ impl Db {
             .iter()
             .map(|key| {
                 let slot = self.slots[self.get_slot(key)].read();
-                Value::Blob(
+                Value::new(
                     slot.get(key)
                         .filter(|v| v.is_valid())
                         .map(|v| hex::encode(&v.value.digest()))
                         .unwrap_or("00000".into())
-                        .as_str()
-                        .into(),
+                        .as_bytes(),
                 )
             })
             .collect::<Vec<Value>>())
@@ -263,26 +262,25 @@ impl Db {
     }
 
     /// Round numbers to store efficiently, specially float numbers. For instance `1.00` will be converted to `1`.
-    fn round_numbers<T>(number: T) -> BytesMut
+    fn round_numbers<T>(number: T) -> Bytes
     where
         T: ToString,
     {
         let number_to_str = number.to_string();
 
         if number_to_str.find('.').is_none() {
-            return number_to_str.as_bytes().into();
+            return Bytes::copy_from_slice(number_to_str.as_bytes());
         }
 
         let number_to_str = number_to_str
             .trim_end_matches(|c| c == '0' || c == '.')
             .to_string();
 
-        if number_to_str.is_empty() {
-            "0"
+        Bytes::copy_from_slice(if number_to_str.is_empty() {
+            b"0"
         } else {
-            number_to_str.as_str()
-        }
-        .into()
+            number_to_str.as_bytes()
+        })
     }
 
     // Converts a given number to a correct Value, it should be used with Self::round_numbers()
@@ -326,7 +324,7 @@ impl Db {
                         )
                         .ok_or(Error::Overflow)?;
                 }
-                let incr_by_bytes = Self::round_numbers(incr_by).freeze();
+                let incr_by_bytes = Self::round_numbers(incr_by);
                 h.insert(sub_key.clone(), incr_by_bytes.clone());
 
                 Self::number_to_value(&incr_by_bytes)
@@ -334,7 +332,7 @@ impl Db {
             None => {
                 #[allow(clippy::mutable_key_type)]
                 let mut h = HashMap::new();
-                let incr_by_bytes = Self::round_numbers(incr_by).freeze();
+                let incr_by_bytes = Self::round_numbers(incr_by);
                 h.insert(sub_key.clone(), incr_by_bytes.clone());
                 let _ = slot.insert(key.clone(), Entry::new(h.into(), None));
                 Self::number_to_value(&incr_by_bytes)
@@ -455,6 +453,14 @@ impl Db {
     /// value at offset.
     pub fn set_range(&self, key: &Bytes, offset: i128, data: &[u8]) -> Result<Value, Error> {
         let mut slot = self.slots[self.get_slot(key)].write();
+
+        if let Some(entry) = slot.get_mut(key).filter(|x| x.is_valid()) {
+            if let Value::Blob(data) = entry.get() {
+                let rw_data = BytesMut::from(&data[..]);
+                entry.change_value(Value::BlobRw(rw_data));
+            }
+        }
+
         let value = slot.get_mut(key).map(|value| {
             if !value.is_valid() {
                 self.expirations.lock().remove(key);
@@ -473,7 +479,7 @@ impl Db {
 
         let length = offset as usize + data.len();
         match value {
-            Some(Value::Blob(bytes)) => {
+            Some(Value::BlobRw(bytes)) => {
                 if bytes.capacity() < length {
                     bytes.resize(length, 0);
                 }
@@ -499,13 +505,13 @@ impl Db {
     /// Copies a key
     pub fn copy(
         &self,
-        source: &Bytes,
-        target: &Bytes,
+        source: Bytes,
+        target: Bytes,
         replace: Override,
         target_db: Option<Arc<Db>>,
     ) -> Result<bool, Error> {
-        let slot = self.slots[self.get_slot(source)].read();
-        let value = if let Some(value) = slot.get(source).filter(|x| x.is_valid()) {
+        let slot = self.slots[self.get_slot(&source)].read();
+        let value = if let Some(value) = slot.get(&source).filter(|x| x.is_valid()) {
             value.clone()
         } else {
             return Ok(false);
@@ -536,20 +542,20 @@ impl Db {
             if replace == Override::No && self.exists(&[target.clone()]) > 0 {
                 return Ok(false);
             }
-            let mut slot = self.slots[self.get_slot(target)].write();
-            slot.insert(target.clone(), value);
+            let mut slot = self.slots[self.get_slot(&target)].write();
+            slot.insert(target, value);
 
             Ok(true)
         }
     }
 
     /// Moves a given key between databases
-    pub fn move_key(&self, source: &Bytes, target_db: Arc<Db>) -> Result<bool, Error> {
+    pub fn move_key(&self, source: Bytes, target_db: Arc<Db>) -> Result<bool, Error> {
         if self.db_id == target_db.db_id {
             return Err(Error::SameEntry);
         }
-        let mut slot = self.slots[self.get_slot(source)].write();
-        let (expires_in, value) = if let Some(value) = slot.get(source).filter(|v| v.is_valid()) {
+        let mut slot = self.slots[self.get_slot(&source)].write();
+        let (expires_in, value) = if let Some(value) = slot.get(&source).filter(|v| v.is_valid()) {
             (
                 value.get_ttl().map(|t| t - Instant::now()),
                 value.value.clone(),
@@ -559,9 +565,16 @@ impl Db {
         };
 
         if Value::Integer(1)
-            == target_db.set_advanced(&source, value, expires_in, Override::No, false, false)
+            == target_db.set_advanced(
+                source.clone(),
+                value,
+                expires_in,
+                Override::No,
+                false,
+                false,
+            )
         {
-            slot.remove(source);
+            slot.remove(&source);
             Ok(true)
         } else {
             Ok(false)
@@ -810,7 +823,7 @@ impl Db {
     }
 
     /// Get multiple copies of entries
-    pub fn get_multi(&self, keys: &[Bytes]) -> Value {
+    pub fn get_multi(&self, keys: VecDeque<Bytes>) -> Value {
         keys.iter()
             .map(|key| {
                 let slot = self.slots[self.get_slot(key)].read();
@@ -846,8 +859,12 @@ impl Db {
         let mut entry = slot.get_mut(key).filter(|x| x.is_valid());
 
         if let Some(entry) = slot.get_mut(key).filter(|x| x.is_valid()) {
+            if let Value::Blob(data) = entry.get() {
+                let rw_data = BytesMut::from(&data[..]);
+                entry.change_value(Value::BlobRw(rw_data));
+            }
             match entry.get_mut() {
-                Value::Blob(value) => {
+                Value::BlobRw(value) => {
                     value.put(value_to_append.as_ref());
                     Ok(value.len().into())
                 }
@@ -865,18 +882,29 @@ impl Db {
     /// If override_all is set to false, all entries must be new entries or the
     /// entire operation fails, in this case 1 or is returned. Otherwise `Ok` is
     /// returned.
-    pub fn multi_set(&self, key_values: &[Bytes], override_all: bool) -> Result<Value, Error> {
+    pub fn multi_set(
+        &self,
+        key_values: VecDeque<Bytes>,
+        override_all: bool,
+    ) -> Result<Value, Error> {
         if key_values.len() % 2 == 1 {
             return Err(Error::Syntax);
         }
 
-        let keys = key_values
-            .iter()
-            .step_by(2)
-            .cloned()
-            .collect::<Vec<Bytes>>();
+        let mut keys = vec![];
+        let mut values = vec![];
+
+        key_values.into_iter().enumerate().for_each(|(key, val)| {
+            if key % 2 == 0 {
+                keys.push(val);
+            } else {
+                values.push(val);
+            }
+        });
 
-        self.lock_keys(&keys);
+        let to_lock = keys.clone();
+
+        self.lock_keys(&to_lock);
 
         if !override_all {
             for key in keys.iter() {
@@ -888,15 +916,16 @@ impl Db {
             }
         }
 
-        for (i, _) in key_values.iter().enumerate().step_by(2) {
-            let mut slot = self.slots[self.get_slot(&key_values[i])].write();
-            slot.insert(
-                key_values[i].clone(),
-                Entry::new(Value::new(&key_values[i + 1]), None),
-            );
+        let mut values = values.into_iter();
+
+        for key in keys.into_iter() {
+            let mut slot = self.slots[self.get_slot(&key)].write();
+            if let Some(value) = values.next() {
+                slot.insert(key, Entry::new(Value::Blob(value), None));
+            }
         }
 
-        self.unlock_keys(&keys);
+        self.unlock_keys(&to_lock);
 
         if override_all {
             Ok(Value::Ok)
@@ -906,27 +935,27 @@ impl Db {
     }
 
     /// Set a key, value with an optional expiration time
-    pub fn set(&self, key: &Bytes, value: Value, expires_in: Option<Duration>) -> Value {
+    pub fn set(&self, key: Bytes, value: Value, expires_in: Option<Duration>) -> Value {
         self.set_advanced(key, value, expires_in, Default::default(), false, false)
     }
 
     /// Set a value in the database with various settings
     pub fn set_advanced(
         &self,
-        key: &Bytes,
+        key: Bytes,
         value: Value,
         expires_in: Option<Duration>,
         override_value: Override,
         keep_ttl: bool,
         return_previous: bool,
     ) -> Value {
-        let mut slot = self.slots[self.get_slot(key)].write();
+        let mut slot = self.slots[self.get_slot(&key)].write();
         let expires_at = expires_in.map(|duration| {
             Instant::now()
                 .checked_add(duration)
                 .unwrap_or_else(far_future)
         });
-        let previous = slot.get(key).filter(|x| x.is_valid());
+        let previous = slot.get(&key).filter(|x| x.is_valid());
 
         let expires_at = if keep_ttl {
             if let Some(previous) = previous {
@@ -973,14 +1002,14 @@ impl Db {
         };
 
         if let Some(expires_at) = expires_at {
-            self.expirations.lock().add(key, expires_at);
+            self.expirations.lock().add(&key, expires_at);
         } else {
             /// Make sure to remove the new key (or replaced) from the
             /// expiration table (from any possible past value).
-            self.expirations.lock().remove(key);
+            self.expirations.lock().remove(&key);
         }
 
-        slot.insert(key.clone(), Entry::new(value, expires_at));
+        slot.insert(key, Entry::new(value, expires_at));
 
         if let Some(to_return) = to_return {
             to_return
@@ -1037,7 +1066,7 @@ impl scan::Scan for Db {
     fn scan(
         &self,
         cursor: Cursor,
-        pattern: Option<&Bytes>,
+        pattern: Option<Bytes>,
         count: Option<usize>,
         typ: Option<Typ>,
     ) -> Result<scan::Result, Error> {
@@ -1046,7 +1075,7 @@ impl scan::Scan for Db {
         let mut last_pos = cursor.last_position as usize;
         let pattern = pattern
             .map(|pattern| {
-                let pattern = String::from_utf8_lossy(pattern);
+                let pattern = String::from_utf8_lossy(&pattern);
                 Pattern::new(&pattern).map_err(|_| Error::InvalidPattern(pattern.to_string()))
             })
             .transpose()?;
@@ -1113,7 +1142,7 @@ mod test {
     #[test]
     fn incr_wrong_type() {
         let db = Db::new(100);
-        db.set(&bytes!(b"num"), Value::Blob(bytes!("some string")), None);
+        db.set(bytes!(b"num"), Value::Blob(bytes!("some string")), None);
 
         let r = db.incr(&bytes!("num"), 1);
 
@@ -1125,7 +1154,7 @@ mod test {
     #[test]
     fn incr_blob_float() {
         let db = Db::new(100);
-        db.set(&bytes!(b"num"), Value::Blob(bytes!("1.1")), None);
+        db.set(bytes!(b"num"), Value::Blob(bytes!("1.1")), None);
 
         assert_eq!(Ok(2.2.into()), db.incr::<Float>(&bytes!("num"), 1.1.into()));
         assert_eq!(Value::Blob(bytes!("2.2")), db.get(&bytes!("num")));
@@ -1134,7 +1163,7 @@ mod test {
     #[test]
     fn incr_blob_int_float() {
         let db = Db::new(100);
-        db.set(&bytes!(b"num"), Value::Blob(bytes!("1")), None);
+        db.set(bytes!(b"num"), Value::Blob(bytes!("1")), None);
 
         assert_eq!(Ok(2.1.into()), db.incr::<Float>(&bytes!("num"), 1.1.into()));
         assert_eq!(Value::Blob(bytes!("2.1")), db.get(&bytes!("num")));
@@ -1143,7 +1172,7 @@ mod test {
     #[test]
     fn incr_blob_int() {
         let db = Db::new(100);
-        db.set(&bytes!(b"num"), Value::Blob(bytes!("1")), None);
+        db.set(bytes!(b"num"), Value::Blob(bytes!("1")), None);
 
         assert_eq!(Ok(2), db.incr(&bytes!("num"), 1));
         assert_eq!(Value::Blob(bytes!("2")), db.get(&bytes!("num")));
@@ -1166,13 +1195,9 @@ mod test {
     #[test]
     fn del() {
         let db = Db::new(100);
-        db.set(&bytes!(b"expired"), Value::Ok, Some(Duration::from_secs(0)));
-        db.set(&bytes!(b"valid"), Value::Ok, None);
-        db.set(
-            &bytes!(b"expiring"),
-            Value::Ok,
-            Some(Duration::from_secs(5)),
-        );
+        db.set(bytes!(b"expired"), Value::Ok, Some(Duration::from_secs(0)));
+        db.set(bytes!(b"valid"), Value::Ok, None);
+        db.set(bytes!(b"expiring"), Value::Ok, Some(Duration::from_secs(5)));
 
         assert_eq!(
             Value::Integer(2),
@@ -1188,13 +1213,9 @@ mod test {
     #[test]
     fn ttl() {
         let db = Db::new(100);
-        db.set(&bytes!(b"expired"), Value::Ok, Some(Duration::from_secs(0)));
-        db.set(&bytes!(b"valid"), Value::Ok, None);
-        db.set(
-            &bytes!(b"expiring"),
-            Value::Ok,
-            Some(Duration::from_secs(5)),
-        );
+        db.set(bytes!(b"expired"), Value::Ok, Some(Duration::from_secs(0)));
+        db.set(bytes!(b"valid"), Value::Ok, None);
+        db.set(bytes!(b"expiring"), Value::Ok, Some(Duration::from_secs(5)));
 
         assert_eq!(None, db.ttl(&bytes!(b"expired")));
         assert_eq!(None, db.ttl(&bytes!(b"not_existing_key")));
@@ -1208,7 +1229,7 @@ mod test {
     #[test]
     fn persist_bug() {
         let db = Db::new(100);
-        db.set(&bytes!(b"one"), Value::Ok, Some(Duration::from_secs(1)));
+        db.set(bytes!(b"one"), Value::Ok, Some(Duration::from_secs(1)));
         assert_eq!(Value::Ok, db.get(&bytes!(b"one")));
         assert!(db.is_key_in_expiration_list(&bytes!(b"one")));
         db.persist(&bytes!(b"one"));
@@ -1218,7 +1239,7 @@ mod test {
     #[test]
     fn purge_keys() {
         let db = Db::new(100);
-        db.set(&bytes!(b"one"), Value::Ok, Some(Duration::from_secs(0)));
+        db.set(bytes!(b"one"), Value::Ok, Some(Duration::from_secs(0)));
         // Expired keys should not be returned, even if they are not yet
         // removed by the purge process.
         assert_eq!(Value::Null, db.get(&bytes!(b"one")));
@@ -1233,12 +1254,12 @@ mod test {
     #[test]
     fn replace_purge_keys() {
         let db = Db::new(100);
-        db.set(&bytes!(b"one"), Value::Ok, Some(Duration::from_secs(0)));
+        db.set(bytes!(b"one"), Value::Ok, Some(Duration::from_secs(0)));
         // Expired keys should not be returned, even if they are not yet
         // removed by the purge process.
         assert_eq!(Value::Null, db.get(&bytes!(b"one")));
 
-        db.set(&bytes!(b"one"), Value::Ok, Some(Duration::from_secs(5)));
+        db.set(bytes!(b"one"), Value::Ok, Some(Duration::from_secs(5)));
         assert_eq!(Value::Ok, db.get(&bytes!(b"one")));
 
         // Purge should return 0 as the expired key has been removed already
@@ -1248,11 +1269,11 @@ mod test {
     #[test]
     fn scan_skip_expired() {
         let db = Db::new(100);
-        db.set(&bytes!(b"one"), Value::Ok, Some(Duration::from_secs(0)));
-        db.set(&bytes!(b"two"), Value::Ok, Some(Duration::from_secs(0)));
+        db.set(bytes!(b"one"), Value::Ok, Some(Duration::from_secs(0)));
+        db.set(bytes!(b"two"), Value::Ok, Some(Duration::from_secs(0)));
         for i in 0u64..20u64 {
             let key: Bytes = i.to_string().into();
-            db.set(&key, Value::Ok, None);
+            db.set(key, Value::Ok, None);
         }
         let result = db
             .scan(Cursor::from_str("0").unwrap(), None, None, None)
@@ -1276,11 +1297,11 @@ mod test {
     #[test]
     fn scan_limit() {
         let db = Db::new(10);
-        db.set(&bytes!(b"one"), Value::Ok, Some(Duration::from_secs(0)));
-        db.set(&bytes!(b"two"), Value::Ok, Some(Duration::from_secs(0)));
+        db.set(bytes!(b"one"), Value::Ok, Some(Duration::from_secs(0)));
+        db.set(bytes!(b"two"), Value::Ok, Some(Duration::from_secs(0)));
         for i in 0u64..2000u64 {
             let key: Bytes = i.to_string().into();
-            db.set(&key, Value::Ok, None);
+            db.set(key, Value::Ok, None);
         }
         let result = db
             .scan(Cursor::from_str("0").unwrap(), None, Some(2), None)
@@ -1292,16 +1313,16 @@ mod test {
     #[test]
     fn scan_filter() {
         let db = Db::new(100);
-        db.set(&bytes!(b"fone"), Value::Ok, None);
-        db.set(&bytes!(b"ftwo"), Value::Ok, None);
+        db.set(bytes!(b"fone"), Value::Ok, None);
+        db.set(bytes!(b"ftwo"), Value::Ok, None);
         for i in 0u64..20u64 {
             let key: Bytes = i.to_string().into();
-            db.set(&key, Value::Ok, None);
+            db.set(key, Value::Ok, None);
         }
         let result = db
             .scan(
                 Cursor::from_str("0").unwrap(),
-                Some(&bytes!(b"f*")),
+                Some(bytes!(b"f*")),
                 None,
                 None,
             )

+ 1 - 1
src/db/scan.rs

@@ -29,7 +29,7 @@ pub trait Scan {
     fn scan(
         &self,
         cursor: Cursor,
-        pattern: Option<&Bytes>,
+        pattern: Option<Bytes>,
         count: Option<usize>,
         typ: Option<Typ>,
     ) -> std::result::Result<Result, Error>;

+ 9 - 1
src/db/utils.rs

@@ -1,6 +1,6 @@
 use crate::error::Error;
 use bytes::Bytes;
-use std::convert::TryFrom;
+use std::convert::{TryFrom, TryInto};
 use tokio::time::{Duration, Instant};
 
 pub(crate) fn far_future() -> Instant {
@@ -47,6 +47,14 @@ pub struct ExpirationOpts {
     pub LT: bool,
 }
 
+impl TryFrom<Vec<Bytes>> for ExpirationOpts {
+    type Error = Error;
+
+    fn try_from(args: Vec<Bytes>) -> Result<Self, Self::Error> {
+        args.as_slice().try_into()
+    }
+}
+
 impl TryFrom<&[Bytes]> for ExpirationOpts {
     type Error = Error;
 

+ 12 - 5
src/dispatcher/command.rs

@@ -13,7 +13,7 @@ use crate::{
 };
 use bytes::Bytes;
 use metered::{ErrorCount, HitCount, InFlight, ResponseTime, Throughput};
-use std::convert::TryInto;
+use std::{collections::VecDeque, convert::TryInto};
 
 /// Command Flags
 #[derive(Debug, Eq, PartialEq, Clone, Copy)]
@@ -140,7 +140,7 @@ impl Command {
     }
 
     /// Returns all database keys from the command arguments
-    pub fn get_keys<'a>(&self, args: &'a [Bytes]) -> Vec<&'a Bytes> {
+    pub fn get_keys(&self, args: &VecDeque<Bytes>, includes_command: bool) -> Vec<Bytes> {
         let start = self.key_start;
         let stop = if self.key_stop > 0 {
             self.key_stop
@@ -149,13 +149,20 @@ impl Command {
         };
 
         if start == 0 {
-            return vec![];
+            return Vec::new();
         }
 
-        let mut result = vec![];
+        let mut result = Vec::new();
 
         for i in (start..stop + 1).step_by(self.key_step) {
-            result.push(&args[i as usize]);
+            result.push(
+                args[if includes_command {
+                    i as usize
+                } else {
+                    i as usize - 1
+                }]
+                .clone(),
+            );
         }
 
         result

+ 10 - 10
src/dispatcher/mod.rs

@@ -278,7 +278,7 @@ dispatcher! {
             true,
         },
         LPUSHX {
-            cmd::list::lpush,
+            cmd::list::lpushx,
             [Flag::Write Flag::DenyOom Flag::Fast],
             -3,
             1,
@@ -350,7 +350,7 @@ dispatcher! {
             true,
         },
         RPUSHX {
-            cmd::list::rpush,
+            cmd::list::rpushx,
             [Flag::Write Flag::DenyOom Flag::Fast],
             -3,
             1,
@@ -442,7 +442,7 @@ dispatcher! {
             true,
         },
         HMSET {
-            cmd::hash::hset,
+            cmd::hash::hmset,
             [Flag::Write Flag::DenyOom Flag::Fast],
             -3,
             1,
@@ -588,7 +588,7 @@ dispatcher! {
             true,
         },
         PEXPIRE {
-            cmd::key::expire,
+            cmd::key::pexpire,
             [Flag::Write Flag::Fast],
             3,
             1,
@@ -597,7 +597,7 @@ dispatcher! {
             true,
         },
         PEXPIREAT {
-            cmd::key::expire_at,
+            cmd::key::pexpire_at,
             [Flag::Write Flag::Fast],
             3,
             1,
@@ -606,7 +606,7 @@ dispatcher! {
             true,
         },
         PEXPIRETIME {
-            cmd::key::expire_time,
+            cmd::key::p_expire_time,
             [Flag::Write Flag::Fast],
             2,
             1,
@@ -615,7 +615,7 @@ dispatcher! {
             true,
         },
         PTTL {
-            cmd::key::ttl,
+            cmd::key::pttl,
             [Flag::ReadOnly Flag::Random Flag::Fast],
             2,
             1,
@@ -642,7 +642,7 @@ dispatcher! {
             true,
         },
         RENAMENX {
-            cmd::key::rename,
+            cmd::key::renamenx,
             [Flag::Write Flag::Write],
             3,
             1,
@@ -842,7 +842,7 @@ dispatcher! {
             true,
         },
         PSETEX {
-            cmd::string::setex,
+            cmd::string::psetex,
             [Flag::Write Flag::DenyOom],
             4,
             1,
@@ -992,7 +992,7 @@ dispatcher! {
             true,
         },
         PSUBSCRIBE {
-            cmd::pubsub::subscribe,
+            cmd::pubsub::psubscribe,
             [Flag::PubSub Flag::Random Flag::Loading Flag::Stale],
             -2,
             0,

+ 8 - 8
src/macros.rs

@@ -101,7 +101,7 @@ macro_rules! dispatcher {
             /// required arguments are provided. This pre-validation ensures each command handler
             /// has fewer logic when reading the provided arguments.
             #[inline(always)]
-            pub fn get_handler(&self, args: &[Bytes]) -> Result<&command::Command, Error> {
+            pub fn get_handler(&self, args: &::std::collections::VecDeque<Bytes>) -> Result<&command::Command, Error> {
                 let command = String::from_utf8_lossy(&args[0]).to_uppercase();
                 let command = self.get_handler_for_command(&command)?;
                 if ! command.check_number_args(args.len()) {
@@ -117,19 +117,18 @@ macro_rules! dispatcher {
             /// required arguments are provided. This pre-validation ensures each command handler
             /// has fewer logic when reading the provided arguments.
             #[inline(always)]
-            pub fn execute<'a>(&'a self, conn: &'a Connection, args: &'a [Bytes]) -> futures::future::BoxFuture<'a, Result<Value, Error>> {
+            pub fn execute<'a>(&'a self, conn: &'a Connection, mut args: std::collections::VecDeque<Bytes>) -> futures::future::BoxFuture<'a, Result<Value, Error>> {
                 async move {
-                    let command = match args.get(0) {
-                        Some(s) => Ok(String::from_utf8_lossy(s).to_uppercase()),
-                        None => Err(Error::EmptyLine),
-                    }?;
+                    let command = args.pop_front()
+                        .map(|s| String::from_utf8_lossy(&s).to_uppercase())
+                        .ok_or(Error::EmptyLine)?;
                     match command.as_str() {
                         $($(
                             stringify!($command) => {
                                 //log::info!("Command: {} -> {:?}", stringify!($command), args);
                                 let command = &self.$command;
                                     let status = conn.status();
-                                if ! command.check_number_args(args.len()) {
+                                if ! command.check_number_args(args.len()+1) {
                                     if status == ConnectionStatus::Multi {
                                         conn.fail_transaction();
                                     }
@@ -143,8 +142,9 @@ macro_rules! dispatcher {
                                     let throughput = &metrics.throughput;
 
                                     if status == ConnectionStatus::Multi && command.is_queueable() {
+                                        args.push_front(command.name().into());
+                                        conn.tx_keys(command.get_keys(&args, true));
                                         conn.queue_command(args);
-                                        conn.tx_keys(command.get_keys(args));
                                         return Ok(Value::Queued);
                                     } else if status == ConnectionStatus::FailedTx && command.is_queueable() {
                                         return Ok(Value::Queued);

+ 7 - 7
src/server.rs

@@ -14,7 +14,7 @@ use bytes::{Buf, Bytes, BytesMut};
 use futures::{channel::mpsc::Receiver, future, SinkExt};
 use log::{info, trace, warn};
 use redis_zero_protocol_parser::{parse_server, Error as RedisError};
-use std::{io, net::SocketAddr, sync::Arc};
+use std::{collections::VecDeque, io, net::SocketAddr, sync::Arc};
 #[cfg(unix)]
 use tokio::net::UnixListener;
 use tokio::{
@@ -39,7 +39,7 @@ impl Encoder<Value> for RedisParser {
 }
 
 impl Decoder for RedisParser {
-    type Item = Vec<Bytes>;
+    type Item = VecDeque<Bytes>;
     type Error = io::Error;
 
     fn decode(&mut self, src: &mut BytesMut) -> io::Result<Option<Self::Item>> {
@@ -176,7 +176,7 @@ async fn serve_unixsocket(
 async fn execute_command(
     conn: &Connection,
     dispatcher: &Dispatcher,
-    args: &[Bytes],
+    args: VecDeque<Bytes>,
 ) -> Option<Value> {
     match dispatcher.execute(&conn, args).await {
         Ok(result) => Some(result),
@@ -199,7 +199,7 @@ async fn handle_new_connection<T: AsyncReadExt + AsyncWriteExt + Unpin, A: ToStr
     let (mut pubsub, conn) = all_connections.new_connection(default_db, addr);
     let dispatcher = all_connections.get_dispatcher();
     // Commands are being buffered when the client is blocked.
-    let mut buffered_commands: Vec<Vec<Bytes>> = vec![];
+    let mut buffered_commands: Vec<VecDeque<Bytes>> = vec![];
     trace!("New connection {}", conn.id());
 
     loop {
@@ -212,7 +212,7 @@ async fn handle_new_connection<T: AsyncReadExt + AsyncWriteExt + Unpin, A: ToStr
                 'outer: for args in buffered_commands.iter() {
                     // Client sent commands while the connection was blocked,
                     // now it is time to process them one by one
-                    match execute_command(&conn, &dispatcher, &args).await {
+                    match execute_command(&conn, &dispatcher, args.clone()).await {
                         Some(result) => if result != Value::Ignore && transport.send(result).await.is_err() {
                             break 'outer;
                         },
@@ -227,10 +227,10 @@ async fn handle_new_connection<T: AsyncReadExt + AsyncWriteExt + Unpin, A: ToStr
             result = transport.next() => match result {
                 Some(Ok(args)) => {
                         if conn.is_blocked() {
-                            buffered_commands.push(args.clone());
+                            buffered_commands.push(args);
                             continue;
                         }
-                        match execute_command(&conn, &dispatcher, &args).await {
+                        match execute_command(&conn, &dispatcher, args).await {
                             Some(result) => if result != Value::Ignore && transport.send(result).await.is_err() {
                                break;
                             },

+ 17 - 5
src/value/mod.rs

@@ -33,7 +33,12 @@ pub enum Value {
     /// Vector/Array of values
     Array(Vec<Value>),
     /// Bytes/Strings/Binary data
-    Blob(BytesMut),
+    Blob(Bytes),
+    /// bytes/String/Binary but that has been modified by bit operations, this
+    /// is not the default state, Blob and BlowRw are the same in practice, but
+    /// in order to a Value to be BlowRw it should have been modified by a bit
+    /// update operation
+    BlobRw(BytesMut),
     /// String. This type does not allow new lines
     String(String),
     /// An error
@@ -76,7 +81,7 @@ impl From<VDebug> for Value {
         Value::Blob(format!(
             "Value at:0x6000004a8840 refcount:1 encoding:{} serializedlength:{} lru:13421257 lru_seconds_idle:367",
             v.encoding, v.serialize_len
-            ).as_str().into()
+            ).into()
         )
     }
 }
@@ -84,7 +89,7 @@ impl From<VDebug> for Value {
 impl Value {
     /// Creates a new Redis value from a stream of bytes
     pub fn new(value: &[u8]) -> Self {
-        Self::Blob(value.into())
+        Self::Blob(Bytes::copy_from_slice(value))
     }
 
     /// Returns the internal encoding of the redis
@@ -139,6 +144,13 @@ impl From<&Value> for Vec<u8> {
             Value::Integer(x) => format!(":{}\r\n", x).into(),
             Value::BigInteger(x) => format!("({}\r\n", x).into(),
             Value::Float(x) => format!(",{}\r\n", x).into(),
+            Value::BlobRw(x) => {
+                let s = format!("${}\r\n", x.len());
+                let mut s: BytesMut = s.as_str().as_bytes().into();
+                s.extend_from_slice(x);
+                s.extend_from_slice(b"\r\n");
+                s.to_vec()
+            }
             Value::Blob(x) => {
                 let s = format!("${}\r\n", x.len());
                 let mut s: BytesMut = s.as_str().as_bytes().into();
@@ -257,8 +269,8 @@ impl From<&Bytes> for Value {
 }
 
 impl From<&str> for Value {
-    fn from(value: &str) -> Value {
-        Value::Blob(value.as_bytes().into())
+    fn from(value: &str) -> Self {
+        Value::Blob(Bytes::copy_from_slice(value.as_bytes()))
     }
 }