#!/bin/bash # Tehcnically, this works. However, the static needs to scan from one side to the # other so you can see the difference in the two X=100 Y=50 FRAMES=10 # Create the mask convert \ -background none -fill black \ -pointsize 24 -size ${X}x${Y} -gravity center \ label:TEST1 mask.png # create pictures of static mkdir -p back fore out for i in $(seq 1 $FRAMES); do convert -size ${X}x${Y} canvas: +noise Random -monochrome back/${i}.png # Generate image of static, then mask it out using the mask.png convert -size ${X}x${Y} canvas: +noise Random -monochrome mask.png -compose CopyOpacity -composite fore/${i}.png done for i in $(seq 1 $FRAMES); do # Layer the two images convert back/$i.png fore/1.png -gravity center -background None -layers Flatten out/$i.png done # create gif # convert -delay 1 -loop 0 out/* out.gif # try video convert -size 1920x50 canvas: +noise Random -monochrome -alpha off background.png ffmpeg -y -framerate 30 -loop true -t 10 -i background.png -i mask.png -i background.png \ -filter_complex " [0]crop=x=n:w=${X}:h=${Y}[bg]; [2]crop=x=iw-n:w=${X}:h=${Y}[fg]; [1]alphaextract[mask]; [fg][mask]alphamerge[top]; [bg][top]overlay=x=0:y=0"\ -c:v libx264 out.mp4