Simplest way to do Vec<&T> -> Vec<T>

Given:

v: Vec<&T>
T: Clone

What is the simplest way to do Vec<&T> -> Vec<T> ?

I tried:

v.cloned() ==> does not work

Then, I went with:

v.into_ter().cloned().collect::<Vec<_>>()
which compiles but seems overkill.

That is the way to do it.

You could use itertools to simplify a little, where cloned will take care of the into_iter() for you, and collect_vec() is slightly nicer than the turbofish. The odds crate can take you a little further with vec, so you'd have vec(cloned(v)). A simpler expression, but more dependencies and use statements.

2 Likes

Iterators aren't overkill for vector transformations (even the no-op transformations) - they are the idiomatic way to do this, and in many cases compiler can understand what is optimizable away and what is necessary.

3 Likes

You can also use my vec-utils crate and get

use vec_utils::VecExt;

let vec: Vec<&T>;

let vec: Vec<T> = vec.map(T::clone);

This will even reuse the allocation if that is possible!

@Cerber-Ursi usually Rust can't elide allocations, so using into_iter().collect() will almost always actually allocate.

2 Likes

Well, if we are to create the new vector, we'll allocate anyway, right? Or you mean that vec = vec.into_iter().collect() is not a no-op, as it seems to be, but rather a physical move?

1 Like

This, almost. Rust is usally fine with vec.into_iter().collect(), but as soon as you introduce even a single no-op closure. Everything falls apart.

pub fn noop(v: Vec<u8>) -> Vec<u8> {
    v.into_iter()
        .map(|x| x)
        .collect()
}

(asm via playground)

long asm
playground::noop:
	pushq	%rbp
	pushq	%r15
	pushq	%r14
	pushq	%r13
	pushq	%r12
	pushq	%rbx
	pushq	%rax
	movq	%rdi, %r14
	movq	(%rsi), %r13
	movq	8(%rsi), %r12
	movq	16(%rsi), %r15
	testq	%r15, %r15
	je	.LBB0_1
	movl	$1, %esi
	movq	%r15, %rdi
	callq	*__rust_alloc@GOTPCREL(%rip)
	testq	%rax, %rax
	je	.LBB0_21
	movq	%rax, %rbx
	leaq	(%r15,%r13), %rbp
	movq	%rax, %rcx
	movq	%r13, %rdi
	cmpq	$31, %r15
	jbe	.LBB0_4
	cmpq	%rbp, %rbx
	jae	.LBB0_12
	leaq	(%rbx,%r15), %rdx
	movq	%rbx, %rcx
	movq	%r13, %rdi
	cmpq	%rdx, %r13
	jb	.LBB0_4

.LBB0_12:
	movq	%r15, %rdi
	andq	$-32, %rdi
	leaq	-32(%rdi), %rsi
	movq	%rsi, %rdx
	shrq	$5, %rdx
	addq	$1, %rdx
	movl	%edx, %ecx
	andl	$3, %ecx
	cmpq	$96, %rsi
	jae	.LBB0_14
	xorl	%edx, %edx
	testq	%rcx, %rcx
	jne	.LBB0_17
	jmp	.LBB0_19

.LBB0_1:
	movl	$1, %ebx
	xorl	%ebp, %ebp
	testq	%r12, %r12
	jne	.LBB0_8
	jmp	.LBB0_9

.LBB0_14:
	movl	$1, %esi
	subq	%rdx, %rsi
	addq	%rcx, %rsi
	addq	$-1, %rsi
	xorl	%edx, %edx

.LBB0_15:
	movups	(%r13,%rdx), %xmm0
	movups	16(%r13,%rdx), %xmm1
	movups	%xmm0, (%rbx,%rdx)
	movups	%xmm1, 16(%rbx,%rdx)
	movups	32(%r13,%rdx), %xmm0
	movups	48(%r13,%rdx), %xmm1
	movups	%xmm0, 32(%rbx,%rdx)
	movups	%xmm1, 48(%rbx,%rdx)
	movups	64(%r13,%rdx), %xmm0
	movups	80(%r13,%rdx), %xmm1
	movups	%xmm0, 64(%rbx,%rdx)
	movups	%xmm1, 80(%rbx,%rdx)
	movups	96(%r13,%rdx), %xmm0
	movups	112(%r13,%rdx), %xmm1
	movups	%xmm0, 96(%rbx,%rdx)
	movups	%xmm1, 112(%rbx,%rdx)
	subq	$-128, %rdx
	addq	$4, %rsi
	jne	.LBB0_15
	testq	%rcx, %rcx
	je	.LBB0_19

.LBB0_17:
	addq	$16, %rdx
	negq	%rcx

.LBB0_18:
	movups	-16(%r13,%rdx), %xmm0
	movups	(%r13,%rdx), %xmm1
	movups	%xmm0, -16(%rbx,%rdx)
	movups	%xmm1, (%rbx,%rdx)
	addq	$32, %rdx
	incq	%rcx
	jne	.LBB0_18

.LBB0_19:
	cmpq	%rdi, %r15
	je	.LBB0_6
	movq	%rbx, %rcx
	addq	%rdi, %rcx
	addq	%r13, %rdi

.LBB0_4:
	movq	%r15, %rdx
	subq	%rdi, %rdx
	addq	%r13, %rdx
	xorl	%esi, %esi

.LBB0_5:
	movzbl	(%rdi,%rsi), %eax
	movb	%al, (%rcx,%rsi)
	addq	$1, %rsi
	cmpq	%rsi, %rdx
	jne	.LBB0_5

.LBB0_6:
	subq	%r13, %rbp
	testq	%r12, %r12
	je	.LBB0_9

.LBB0_8:
	movl	$1, %edx
	movq	%r13, %rdi
	movq	%r12, %rsi
	callq	*__rust_dealloc@GOTPCREL(%rip)

.LBB0_9:
	movq	%rbx, (%r14)
	movq	%r15, 8(%r14)
	movq	%rbp, 16(%r14)
	movq	%r14, %rax
	addq	$8, %rsp
	popq	%rbx
	popq	%r12
	popq	%r13
	popq	%r14
	popq	%r15
	popq	%rbp
	retq

.LBB0_21:
	movl	$1, %esi
	movq	%r15, %rdi
	callq	*alloc::alloc::handle_alloc_error@GOTPCREL(%rip)
	ud2

Here is the asm for the cloning the usizes in a Vec<&usize> using vec_utils (via cargo asm)

fn vec_clone(v: Vec<&usize>) -> Vec<usize> {
    use vec_utils::VecExt;
    v.map(usize::clone)
}
`vec_clone` asm
vec_utils::clone:
 push    rsi
 mov     rax, rcx
 movdqu  xmm0, xmmword, ptr, [rdx]
 mov     r8, qword, ptr, [rdx, +, 16]
 test    r8, r8
 je      .LBB1_7
 movq    rdx, xmm0
 lea     rcx, [r8, -, 1]
 mov     r9d, r8d
 and     r9d, 3
 cmp     rcx, 3
 jb      .LBB1_4
 mov     rsi, r9
 sub     rsi, r8
.LBB1_3:
 mov     r10, qword, ptr, [rdx]
 mov     r11, qword, ptr, [rdx, +, 8]
 mov     rcx, qword, ptr, [r10]
 mov     qword, ptr, [rdx], rcx
 mov     rcx, qword, ptr, [r11]
 mov     qword, ptr, [rdx, +, 8], rcx
 mov     rcx, qword, ptr, [rdx, +, 16]
 mov     rcx, qword, ptr, [rcx]
 mov     qword, ptr, [rdx, +, 16], rcx
 mov     rcx, qword, ptr, [rdx, +, 24]
 mov     rcx, qword, ptr, [rcx]
 mov     qword, ptr, [rdx, +, 24], rcx
 add     rdx, 32
 add     rsi, 4
 jne     .LBB1_3
.LBB1_4:
 test    r9, r9
 je      .LBB1_7
 xor     ecx, ecx
.LBB1_6:
 mov     rsi, qword, ptr, [rdx, +, 8*rcx]
 mov     rsi, qword, ptr, [rsi]
 mov     qword, ptr, [rdx, +, 8*rcx], rsi
 add     rcx, 1
 cmp     r9, rcx
 jne     .LBB1_6
.LBB1_7:
 movdqu  xmmword, ptr, [rax], xmm0
 mov     qword, ptr, [rax, +, 16], r8
 pop     rsi
 ret
2 Likes

Looks like there is indeed a specialization for the provably no-op cases, yes (i.e. vec.(into_)?iter(_mut)?().collect::<Vec<_>>() and vec.extend(vec2.(into_)?iter(_mut)?())).

3 Likes

This topic was automatically closed 90 days after the last reply. New replies are no longer allowed.