Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Loading...
Filters in MariaDB MaxScale intercept and modify database traffic. Use them to transform, block, or log queries, enabling fine-grained control over your database workload and security.
[MyComment]
type=filter
module=comment
inject="Comment to be injected"
[MyService]
type=service
router=readwritesplit
servers=server1
user=myuser
password=mypasswd
filters=MyComment[IPComment]
type=filter
module=comment
inject="IP=$IP"
[MyService]
type=service
router=readwritesplit
servers=server1
user=myuser
password=mypasswd
filters=IPCommentSELECT user FROM people;/* IP=::ffff:127.0.0.1 */SELECT user FROM people;errorok2 (0b00010) Reached max_resultset_rows or max_resultset_size is logged.[Read-Service]
type=service
router=readconnroute
cluster=MyCluster
filters=readonly[MaxRows]
type=filter
module=maxrows
[MaxRows-Routing-Service]
type=service
...
filters=MaxRowsmax_resultset_rows=1000max_resultset_size=128KiMariaDB [(test)]> select * from test.t4;
ERROR 1415 (0A000): Row limit/size exceeded for query: select * from test.t4debug=2[MaxRows]
type=filter
module=maxrows
max_resultset_rows=10000
max_resultset_size=256000Manage LOAD DATA INFILE operations. This filter splits large data loads into smaller chunks to prevent blocking and improve stability during bulk data ingestion.
Ensure data consistency by routing reads to the primary server after a write. This filter guarantees that a client sees its own modifications immediately.
caseextended[OptimisticTrx]
type=filter
module=optimistictrx
[MyService]
...
filters=OptimisticTrxLOAD DATA INFILE 'S3://my-bucket/my-data.csv' INTO TABLE t1
FIELDS TERMINATED BY ',' LINES TERMINATED BY '\n';[LDI-Filter]
type=filter
module=ldi
host=s3.amazonaws.com
region=us-east-1SET @maxscale.ldi.s3_key='<my-access-key>', @maxscale.ldi.s3_secret='<my-secret-key>';LOAD DATA INFILE 'S3://my-bucket/my-data.csv' INTO TABLE t1;INSERT INTO departments VALUES ('d1234', 'NewDepartment'); -- maxscale ccr=ignorematch=.*INSERT.*
ignore=.*UPDATE.*
options=case,extended[CCRFilter]
type=filter
module=ccrfilter
time=5[BinlogFilter]
type=filter
module=binlogfilter
match=/customers[.]/
exclude=/[.]orders/
[BinlogServer]
type=service
router=binlogrouter
server_id=33
filters=BinlogFilter
[BinlogListener]
type=listener
service=BinlogServer
port=4000maxctrl create filter OptimisticTrx optimistictrx
maxctrl alter service-filter MyService OptimisticTrx[PsReuse]
type=filter
module=psreuse
[MyService]
...
filters=PsReuseObfuscate sensitive data in query results. This filter masks specific columns based on user roles, ensuring compliance with privacy regulations without altering the database.
ruleswarn_type_mismatchlarge_payloadprevent_function_usagerequire_fully_parsedtreat_string_arg_as_fieldcheck_user_variablescheck_unionscheck_subqueriesreplaceobfuscatewithapplies_toexemptedreloadmasking_rules.jsonENUMSETalwaysabort> SELECT name, ssn FROM person;+-------+-------------+
+ name | ssn |
+-------+-------------+
| Alice | 721-07-4426 |
| Bob | 435-22-3267 |
...+-------+-------------+
+ name | ssn |
+-------+-------------+
| Alice | XXX-XX-XXXX |
| Bob | XXX-XX-XXXX |
...CREATE TABLE cheat (revealed_ssn TEXT);
INSERT INTO cheat SELECT ssn FROM users;
SELECT revealed_ssn FROM cheat;mysql> set @@sql_mode = 'ANSI_QUOTES';
mysql> select concat("ssn") from managers;[Mask-SSN]
type=filter
module=masking
rules=...
[SomeService]
type=service
...
filters=Mask-SSNrules=/path/to/rules-filewarn_type_mismatch=alwayslarge_payload=ignoreSELECT CONCAT(masked_column) FROM tbl;prevent_function_usage=falserequire_fully_parsed=falsetreat_string_arg_as_field=falseset @a = (select ssn from customer where id = 1);check_user_variables=falseSELECT a FROM t1 UNION SELECT b FROM t2;check_unions=falseSELECT * FROM (SELECT a AS b FROM t1) AS t2;check_subqueries=false{
"rules": [ ... ]
}{
"rules": [
{
"replace": { ... },
"with": { ... },
"applies_to": [ ... ],
"exempted": [ ... ]
}
]
}{
"rules": [
{
"replace": {
"database": "db1",
"table": "person",
"column": "ssn"
},
"with": { ... },
"applies_to": [ ... ],
"exempted": [ ... ]
}
]
}SELECT ssn FROM person2;SELECT ssn FROM person UNION SELECT ssn FROM person2;SELECT ssn FROM person2 UNION SELECT ssn FROM person2;SELECT ssn FROM person UNION SELECT ssn FROM person;"replace": {
"column": "ssn",
"match": "(123)"
},
"with": {
"fill": "X#"
}"obfuscate": {
"column": "name"
}SELECT name from db1.tbl1;`
+------+
| name |
+------+
| $-~) |
+------+{
"rules": [
{
"replace": {
"column": "ssn"
},
"with": {
"value": "XXX-XX-XXXX"
},
"applies_to": [ ... ],
"exempted": [ ... ]
},
{
"replace": {
"column": "age"
},
"with": {
"fill": "*"
},
"applies_to": [ ... ],
"exempted": [ ... ]
},
{
"replace": {
"column": "creditcard"
},
"with": {
"value": "1234123412341234",
"fill": "0"
},
"applies_to": [ ... ],
"exempted": [ ... ]
},
]
}{
"rules": [
{
"replace": { ... },
"with": { ... },
"applies_to": [ "'alice'@'host'", "'bob'@'%'" ],
"exempted": [ ... ]
}
]
}{
"rules": [
{
"replace": { ... },
"with": { ... },
"applies_to": [ ... ],
"exempted": [ "'admin'" ]
}
]
}MaxScale> call command masking reload MyMaskingFilter[MyMasking]
type=filter
module=masking
warn_type_mismatch=always
large_payload=abort
rules=masking_rules.json
[MyService]
type=service
...
filters=MyMasking{
"rules": [
{
"replace": {
"column": "ssn"
},
"with": {
"value": "012345-ABCD",
"fill": "X"
}
}
]
}[Throttle]
type = filter
module = throttlefilter
max_qps = 500
throttling_duration = 60000
...
[Routing-Service]
type = service
filters = ThrottleRoute queries to specific servers based on regex patterns. This filter allows you to direct traffic matching defined rules to a particular backend server by name.
idle, learn-clear, learn-append, supervise, enforcereturn-error, disconnectcollective, individual""caseextendedSELECT * FROM t WHERE f = 10
SELECT * FROM t WHERE f = 20SELECT * FROM t WHERE f > 5
SELECT * FROM t WHERE f = 10
INSERT INTO t VALUES (42)
DELETE FROM t WHERE f > 20
SELECT * FROM users WHERE username = 'input' AND password = 'input'SELECT * FROM t WHERE f > ?
SELECT * FROM t WHERE f = ?
INSERT INTO t VALUES (?)
DELETE FROM t WHERE f > ?
SELECT * FROM users WHERE username = ? AND password = ?SELECT * FROM t WHERE f > 100
SELECT * FROM t WHERE f = 42
INSERT INTO t VALUES (84)
DELETE FROM t WHERE f > 200
SELECT * FROM users WHERE username = 'joe' AND password = 'secret'# != is neither > nor =
SELECT * FROM t WHERE f != 10
# During learning only one value was inserted
INSERT INTO t VALUES (1), (2)
# During learning DELETE was always accompanied by a WHERE clause
DELETE FROM t
# An apparent SQL-injection attack does not match what was learnt.
SELECT * FROM users WHERE username = '' OR '1'='1' AND password = ''[MyFirewall]
type=filter
module=firewallexclude_users=admin, 'super'@'192.168.02.1'2024-11-18 08:01:47 warning: (1) [firewall] (Service); Firewall incident (user@127.0.0.1): DELETE FROM t[MyFirewall]
type=filter
module=firewall[MyService]
type=service
router=readwritesplit
...
filters=MyFirewallmaxctrl alter filter MyFirewall mode=learn-clearmaxctrl alter filter MyFirewall mode=supervisemaxctrl alter filter MyFilter mode=enforce[NamedServerFilter]
type=filter
module=namedserverfilter
match01=^Select.*TableOne$
target01=server2,server3
match22=^SELECT.*TableTwo$
target22=->master
[MyService]
type=service
router=readwritesplit
servers=server1,server2,server3
user=myuser
password=mypasswd
filters=NamedServerFiltermatch01=^SELECT
options=case,extendedtarget01=MyServer2source=127.0.0.1source=192.%.%.%
source=192.168.%.%
source=192.168.10.%source=192.168.21.3,192.168.10.%user=john[NamedServerFilter]
type=filter
module=namedserverfilter
match02= *from *users
target02=server2
[MyService]
type=service
router=readwritesplit
servers=server1,server2
user=myuser
password=mypasswd
filters=NamedServerFilterIdentify the most expensive queries. This filter logs the top N slowest queries passing through MaxScale helping to pinpoint performance bottlenecks.
Duplicate query traffic to multiple targets. This filter sends a copy of incoming requests to a secondary service or server useful for testing and auditing.
Control query routing using SQL comments. This filter interprets hints embedded in queries to direct traffic to specific servers, primaries, or replicas.
Rewrite or block queries using regular expressions. This versatile filter modifies SQL statements on the fly or rejects them based on pattern matching rules.
caseextendedcaseextendedcaseextended[MyLogFilter]
type=filter
module=topfilter
[Service]
type=service
router=readconnroute
servers=server1
user=myuser
password=mypasswd
filters=MyLogFilterfilebase=/tmp/SqlQueryLogcount=30match=select.*from.*customer.*where
exclude=where
options=case,extendedsource=127.0.0.1user=john[ProductsUpdateTop20]
type=filter
module=topfilter
count=20
match=UPDATE.*PRODUCTS.*WHERE
exclude=UPDATE.*PRODUCTS_STOCK.*WHERE
filebase=/var/logs/top/ProductsUpdate[SlowAppServer]
type=filter
module=topfilter
count=20
source=192.168.0.32
filebase=/var/logs/top/SlowAppServer[ControlAppServer]
type=filter
module=topfilter
count=20
source=192.168.0.42
filebase=/var/logs/top/ControlAppServer[App-Service]
type=service
router=readconnroute
servers=server1
user=myuser
password=mypasswd
filters=SlowAppServer | ControlAppServer-bash-4.1$ cat /var/logs/top/Employees-top-10.137
Top 10 longest running queries in session.
==========================================
Time (sec) | Query
-----------+-----------------------------------------------------------------
22.985 | select sum(salary), year(from_date) from salaries s, (select distinct year(from_date) as y1 from salaries) y where (makedate(y.y1, 1) between s.from_date and s.to_date) group by y.y1
5.304 | select d.dept_name as "Department", y.y1 as "Year", count(*) as "Count" from departments d, dept_emp de, (select distinct year(from_date) as y1 from dept_emp order by 1) y where d.dept_no = de.dept_no and (makedate(y.y1, 1) between de.from_date and de.to_date) group by y.y1, d.dept_name order by 1, 2
2.896 | select year(now()) - year(birth_date) as age, gender, avg(salary) as "Average Salary" from employees e, salaries s where e.emp_no = s.emp_no and ("1988-08-01" between from_date AND to_date) group by year(now()) - year(birth_date), gender order by 1,2
2.160 | select dept_name as "Department", sum(salary) / 12 as "Salary Bill" from employees e, departments d, dept_emp de, salaries s where e.emp_no = de.emp_no and de.dept_no = d.dept_no and ("1988-08-01" between de.from_date AND de.to_date) and ("1988-08-01" between s.from_date AND s.to_date) and s.emp_no = e.emp_no group by dept_name order by 1
0.845 | select dept_name as "Department", avg(year(now()) - year(birth_date)) as "Average Age", gender from employees e, departments d, dept_emp de where e.emp_no = de.emp_no and de.dept_no = d.dept_no and ("1988-08-01" between from_date AND to_date) group by dept_name, gender
0.668 | select year(hire_date) as "Hired", d.dept_name, count(*) as "Count" from employees e, departments d, dept_emp de where de.emp_no = e.emp_no and de.dept_no = d.dept_no group by d.dept_name, year(hire_date)
0.249 | select moves.n_depts As "No. of Departments", count(moves.emp_no) as "No. of Employees" from (select de1.emp_no as emp_no, count(de1.emp_no) as n_depts from dept_emp de1 group by de1.emp_no) as moves group by moves.n_depts order by 1
0.245 | select year(now()) - year(birth_date) as age, gender, count(*) as "Count" from employees group by year(now()) - year(birth_date), gender order by 1,2
0.179 | select year(hire_date) as "Hired", count(*) as "Count" from employees group by year(hire_date)
0.160 | select year(hire_date) - year(birth_date) as "Age", count(*) as Count from employees group by year(hire_date) - year(birth_date) order by 1
-----------+-----------------------------------------------------------------
Session started Wed Jun 18 18:41:03 2014
Connection from 127.0.0.1
Username massi
Total of 24 statements executed.
Total statement execution time 35.701 seconds
Average statement execution time 1.488 seconds
Total connection time 46.500 seconds
-bash-4.1$[DataMartFilter]
type=filter
module=tee
target=DataMart
[Data-Service]
type=service
router=readconnroute
servers=server1
user=myuser
password=mypasswd
filters=DataMartFiltermatch=/insert.*into.*order*/exclude=/select.*from.*t1/options=case,extendedsource=127.0.0.1user=john[Orders]
type=service
router=readconnroute
servers=server1, server2, server3, server4
user=massi
password=6628C50E07CCE1F0392EDEEB9D1203F3
filters=ReplicateOrders
[ReplicateOrders]
type=filter
module=tee
target=DataMart
match=insert[ ]*into[ ]*orders
[DataMart]
type=service
router=readconnroute
servers=datamartserver
user=massi
password=6628C50E07CCE1F0392EDEEB9D1203F3
filters=QLA-DataMart
[QLA-DataMart]
type=filter
module=qlafilter
options=/var/log/DataMart/InsertsLog
[Orders-Listener]
type=listener
target=Orders
port=4011
[DataMart-Listener]
type=listener
target=DataMart
port=4012# The --comments flag is needed for the command line client
mariadb --comments -u my-user -psecret -e "SELECT @@hostname -- maxscale route to server db1"-- maxscale <hint body>-- maxscale route to [master | slave | server <server name>]-- maxscale route to master-- maxscale route to slave-- maxscale route to server <server name>-- maxscale route to last-- maxscale <param>=<value>-- maxscale <hint name> prepare <hint content>-- maxscale <hint name> begin-- maxscale end-- maxscale <hint name> begin <hint content>-- maxscale begin <hint content>SELECT user FROM accounts WHERE id = ? -- maxscale route to masterPREPARE my_ps FROM 'SELECT user FROM accounts WHERE id = ?';
EXECUTE my_ps USING 123; -- maxscale route to master[ReadWriteService]
type=service
router=readwritesplit
servers=server1,server2
user=maxuser
password=maxpwd
filters=Hint
[Hint]
type=filter
module=hintfilterINSERT INTO table1 VALUES ("John","Doe",1);
SELECT * FROM table1; -- maxscale route to master[MyRegexFilter]
type=filter
module=regexfilter
match=some string
replace=replacement string
[MyService]
type=service
router=readconnroute
servers=server1
user=myuser
password=mypasswd
filters=MyRegexfiltermatch=TYPE[ ]*=
options=casereplace=ENGINE =source=127.0.0.1user=johnlog_file=/tmp/regexfilter.loglog_trace=true[CreateTableFilter]
type=filter
module=regexfilter
options=ignorecase
match=TYPE\s*=
replace=ENGINE=
[MyService]
type=service
router=readconnroute
servers=server1
user=myuser
password=mypasswd
filters=CreateTableFilterTransform SQL queries using template-based rules. This filter modifies incoming statements to optimize performance or fix compatibility issues without changing application code.
%%
# options
regex_grammar: Native
case_sensitive: true
what_if: false
continue_if_matched: false
ignore_whitespace: true
%
# match template
@{1:^}select @{2} from my_table where id = @{3}
%
# replace template
select @{2} from my_table where id = @{3}%%
# use default options by leaving this blank
%
@{1:^}select count(distinct @{2}) from @{3}
%
select count(*) from (select distinct @{1} from @{2}) as t123
Input: select count(distinct author) from books where entity != "AI"
Rewritten: select count(*) from (select distinct author from books where entity != "AI") as t123%%
%
from mytable
%
from mytable force index (myindex)
Input: select name from mytable where id=42
Rewritten: select name from mytable force index (myindex) where id=42%%
%
@{1:^}select @{2} from mytable
%
select @{2} from mytable force index (myindex)%%
%
@{1:^}select zip_code from address_book where str_id = @{1:["]}@{2:[[:digit:]]+}@{3:["]}
%
select zip_code from address_book where id = @{2}
Input: select zip_code from address_book where str_id = "1234"
Rewritten: select zip_code from address_book where id = 1234%%
regex_grammar: EPosix
continue_if_matched: true
%
wrong_table_name
%
correct_table_name
%%
regex_grammar: EPosix
%
wrong_column_name
%
correct_column_name[Rewrite]
type = filter
module = rewritefilter
template_file = /path/to/template_file.rf
...
[Router]
type=service
...
filters=Rewrite%%
options
%
match template
%
replace templatecase_sensitive: true%%
case_sensitive: false
%
@{1:^}select @{2}
from mytable
where user = @{3}
%
select @{2} from mytable where user = @{3}
and @{3} in (select user from approved_users){ "templates" :
[
{
"case_sensitive" : false,
"match_template" : "@{1:^}select @{2} from mytable where user = @{3}",
"replace_template" : "select @{2} from mytable where user = @{3}
and @{3} in (select user from approved_users)"
}
]
}maxctrl alter filter Rewrite log_replacement=falseLog every SQL query passing through MaxScale. This filter provides comprehensive audit trails by recording query text, execution time, and client details.
filebasematchexcludeoptionsusersourceuser_matchuser_excludesource_matchsource_excludelog_typelog_dataduration_unituse_canonical_formflushappendseparatornewline_replacementignorecaseextendedunifiedstdoutsessiondateuserreply_timetotal_reply_timequerydefault_dbnum_rowsreply_sizetransactiontransaction_timenum_warningserror_msg[MyLogFilter]
type=filter
module=qlafilter
filebase=/tmp/SqlQueryLog
[MyService]
type=service
router=readconnroute
servers=server1
user=myuser
password=mypasswd
filters=MyLogFilterfilebase=/tmp/SqlQueryLoguser_match=/(^alice$)|(^bob$)/user_exclude=/(^alice$)|(^bob$)/source_match=/(^127[.]0[.]0[.]1)|(^192[.]168[.]0[.]109)/source_exclude=/(^127[.]0[.]0[.]1)|(^192[.]168[.]0[.]109)/MariaDB [test]> select secret from T where x password="clear text pwd";
ERROR 1064 (42000): You have an error in your SQL syntax; check the manual
that corresponds to your MariaDB server version for the right syntax to
use near 'password="clear text pwd"' at line 1newline_replacement=" NL "[ProductsSelectLogger]
type=filter
module=qlafilter
match=SELECT.*from.*PRODUCTS .*
exclude=WHERE.*PRODUCT_ID.*
filebase=/var/logs/qla/SelectProducts
[Product-Service]
type=service
router=readconnroute
servers=server1
user=myuser
password=mypasswd
filters=ProductsSelectLogger07:12:56.324 7/01/2016, SELECT * FROM PRODUCTSRecord and replay real database traffic. This filter captures client sessions to create realistic benchmarks and test environments based on production workloads.
start <filter> [options]stop <filter>capture_dirstart_capturecapture_durationcapture_sizemaxctrl create filter CAPTURE_FLTR wcar
maxctrl link service RWS-Router CAPTURE_FLTRmaxctrl call command wcar start CAPTURE_FLTR <options>maxctrl call command wcar stop CAPTURE_FLTRmaxctrl unlink service RWS-Router CAPTURE_FLTR
maxctrl destroy filter CAPTURE_FLTR[CAPTURE_FLTR]
type=filter
module=wcar
capture_duration=1h # Limit capture duration to one hour
capture_size=1Gi # Limit capture size to 1GiB
start_capture=true # Start capturing immediately after starting MaxScale[server1]
type=server
address=127.0.0.1
port=3306
[MariaDB-Monitor]
type=monitor
module=mariadbmon
servers=server1
user=maxuser
password=maxpwd
[CAPTURE_FLTR]
type=filter
module=wcar
capture_duration=1h # Limit capture duration to one hour
capture_size=1Gi # Limit capture size to 1GiB
start_capture=true # Start capturing immediately after starting MaxScale
[RWS-Router]
type=service
router=readwritesplit
cluster=MariaDB-Monitor
user=maxuser
password=maxpwd
filters=CAPTURE_FLTR
[RWS-Listener]
type=listener
service=RWS-Router
protocol=MariaDBClient
port=4006maxctrl call command wcar stop CAPTURE_FLTRtar -caf captures.tar.gz -C /var/lib/maxscale wcarmaxctrl call command wcar <command> <filter> [options]maxctrl call command wcar start CAPTURE_FLTR prefix=Scenario1 size=10Gmaxctrl call command wcar stop CAPTURE_FLTRmaxplayer summary /path/to/capture.cxCREATE USER 'maxreplay'@'%' IDENTIFIED BY 'replay-pw';
GRANT ALL ON *.* TO 'maxreplay'@'%';cd /path/to/capture-dir
tar -xaf captures.tar.gzmaxplayer replay --user maxreplay --password replay-pw --host <host:port> --output baseline-result.csv /path/to/capture.cxmaxplayer replay --user maxreplay --password replay-pw --host <host:port> --output comparison-result.csv /path/to/capture.cxmaxplayer canonicals /path/to/capture.cx > canonicals.csvmaxpostprocess canonicals.csv baseline-result.csv -o baseline-summary.json
maxpostprocess canonicals.csv comparison-result.csv -o comparison-summary.jsonmaxvisualize baseline-summary.json comparison-summary.jsonmaxvisualize baseline-summary.json comparison-summary.json --Voila.ip='0.0.0.0'maxplayer -u user -p pwd --speed 1.5 -i 5s -o baseline.csv capture_2024-09-06_090002.cx --help
Usage: maxplayer [OPTION]... [COMMAND] FILE
Commands: (default: replay)
summary Show a summary of the capture.
replay Replay the capture.
convert Converts the input file (either .cx or .rx) to a replay file (.rx or .csv).
canonicals List the canonical forms of the captured SQL as CSV.
dump-data Dump capture data as SQL.
show Show the SQL of one or more events.
Options:
--user User name for login to the replay server.
-u This version does not support using the actual user names
that were used during capture.
--password Only clear text passwords are supported as of yet.
-p
--host The address of the replay server in <IP>:<port> format.
-h E.g. 127.0.0.1:4006
--output The name of the output file: e.g. baseline.csv.
-o
--report Periodically report statistics of ongoing operations.
-r The option takes a duration, such as 10s.
--report-file The --report option by default writes to stdout.
-R Provide the name of the file to write to. The file will
be truncated every time it is written to, allowing for a
simple status window by running 'watch cat <path-to-file>'
in a terminal.
--speed The value is a multiplier. 2.5 is 2.5x speed and 0.5 is half speed.
-s A value of zero means no limit, or replay as fast as possible.
A multiplier of 2.5 might not have any effect as the actual time spent
depends on many factors, such as the captured volume and replay server.
--idle-wait Relates to playback speed, and can be used together with --speed.
-i During capture there can be long delays where there is no traffic.
One hour of no capture traffic would mean replay waits for one hour.
idle-wait allows to move simulation time forwards when such gaps
occure. A 'gap' starts when all prior queries have fully executed.
--idle-wait takes a duration value. A negative value turns the feature off,
i.e. the one hour wait would happen.
--idle-wait 0s means time moves to the event start-time immediately
when a gap is detected, i.e., all gaps are skipped over.
--idle-wait 10s means time moves to the event start-time 10 seconds
(wall time) after the gap was detected. Shorter
gaps than 10 seconds will thus be fully waited for.
--idle-wait has a default value of 1 second.
Examples: 1h, 60m, 3600s, 3600000ms, which all define the same duration.
--query-filter Options: none, write-only, read-only. Default: none.
-f Replay can optionally apply only writes or only reads. This option is useful
once the databases to be tested have been prepared (see full documentation)
and optionally either a write-only run, or a full replay has been run.
Now multiple read-only runs against the server(s) are simple as no further
data syncronization is needed.
Note that this mode has its limitations as the query results may
be very different than what they were during capture.
--analyze Enabling this option will track the server Rows_read statistic for each query.
-A This will slow down the overall replay time. The query time measurements
are still valid, but currently this option should only be used when
it is of real value to know how many rows the server read for each query.
--verbose Verbose output. The option can be repeated for more verbosity: -vvv
-v
--version Display the version number and copyrights.
-V
input file: capture_2024-09-06_090002.cx
-h --help true
-u --user user
-p --password pwd
-H --host 127.1.1.0:3306
-o --output baseline.csv
-r --report 0ns
-R --report-file
-s --speed 1.5
-i --idle-wait 5s
-f --query-filter none
-A --analyze false
-v --verbose 0
-V --version 0.2Improve query performance by caching SELECT statement results. This filter stores result sets in memory, serving identical subsequent queries directly from MaxScale.
information_schemastoragestorage_optionshard_ttlsoft_ttlmax_resultset_rowsmax_resultset_sizemax_countmax_sizerulescached_dataselectscache_in_transactionsdebugenabledinvalidateclear_cache_on_parse_errorsuserstimeout@maxscale.cache.populate@maxscale.cache.use@maxscale.cache.soft_ttl@maxscale.cache.hard_ttlcached_data=thread_specificcache_data=sharedstorage_inmemorystorage_memcachedservermax_value_sizestorage_redisserverusernamepasswordsslssl_certssl_keyssl_cacache_rules.jsonthread_specificverify_cacheableFOUND_ROWSGET_LOCKIS_FREE_LOCKIS_USED_LOCKLAST_INSERT_IDLOAD_FILELOCALTIMELOCALTIMESTAMPMASTER_POS_WAITNOWRANDRELEASE_LOCKSESSION_USERSLEEPSYSDATESYSTEM_USERUNIX_TIMESTAMPUSERUUIDUUID_SHORTread_only_transactionsall_transactionsall_transactions: The cache will be used and populated inside explicitly read-only transactions. Inside transactions that are not explicitly read-only, the cache will be used and populated until the first non-SELECT statement.20b00010currentisolatedunlikeSELECT * FROM t WHERE a=1;INSERT INTO t SET a=42;Timeline 1 Timeline 2
Clients execute INSERT ... SELECT COUNT(*) FROM tbl
MaxScale -> DB SELECT COUNT(*) FROM tbl
MaxScale -> DB INSERT ...MaxScale -> Cache Delete invalidated values
MaxScale -> Cache Store result and invalidation key[Cache]
type=filter
module=cache
hard_ttl=30
soft_ttl=20
rules=...
...
[Cached-Routing-Service]
type=service
...
filters=Cachestorage=storage_redishard_ttl=60ssoft_ttl=60smax_resultset_rows=1000max_resultset_size=128Kimax_count=1000max_size=100Mirules=/path/to/rules-filemaxctrl alter filter MyCache rules='/path/to/rules-file'cached_data=sharedselects=verify_cacheablecache_in_transactions=neverdebug=31enabled=false* `never`: No invalidation is performed. This is the default.
* `current`: When a modification is made, entries in the cache used by
the current session are invalidated. Other sessions that use the same
cache will also be affected, but sessions that use another cache will
not.* `mixed`: The data of different users is stored in the same
cache. This is the default and may cause that a user can
access data he should not have access to.
* `isolated`: Each user has a unique cache and there can be
no unintended sharing.timeout=7000msSET @maxscale.cache.populate=TRUE;
SELECT a, b FROM tbl;
SET @maxscale.cache.populate=FALSE;
SELECT a, b FROM tbl;SELECT @maxscale.cache.populate;SET @maxscale.cache.use=TRUE;
SELECT a, b FROM tbl;
SET @maxscale.cache.use=FALSE;
SELECT a, b FROM tbl;SELECT @maxscale.cache.use;SET @maxscale.cache.soft_ttl=600;
SELECT a, b FROM unimportant;
SET @maxscale.cache.soft_ttl=60;
SELECT c, d FROM important;SELECT @maxscale.cache.soft_ttl;SET @maxscale.cache.soft_ttl=600, @maxscale.cache.hard_ttl=610;
SELECT a, b FROM unimportant;
SET @maxscale.cache.soft_ttl=60, @maxscale.cache.hard_ttl=65;
SELECT c, d FROM important;SELECT @maxscale.cache.hard_ttl;[TheCache]
type=filter
module=cache
enabled=falseSET @maxscale.cache.populate=TRUE;
SELECT a, b FROM tbl1;
SELECT c, d FROM tbl2;
SELECT e, f FROM tbl3;
SET @maxscale.cache.populate=FALSE;SET @maxscale.cache.use=TRUE;
SELECT a, b FROM tbl1;
SET @maxscale.cache.use=FALSE;UPDATE tbl1 SET a = ...;
SET @maxscale.cache.populate=TRUE;
SELECT a, b FROM tbl1;
SET @maxscale.cache.populate=FALSE;{
store: [ ... ],
use: [ ... ]
}[
{
store: [ ... ],
use: [ ... ]
},
{ ... }
]{
"attribute": <string>,
"op": <string>
"value": <string>
}SELECT * FROM db1.tblUSE db1;
SELECT * FROM tblSELECT * FROM tbl WHERE a = 2 AND b = 3;SELECT * FROM tbl WHERE b = 3 AND a = 2;SELECT a FROM tbl;SELECT b FROM tbl WHERE a > 5;USE somedb;
SELECT fld FROM tbl;{
"store": [
{
"attribute": "database",
"op": "=",
"value": "db1"
}
]
}{
"store": [
{
"attribute": "table",
"op": "!=",
"value": "tbl1"
}
]
}{
"store": [
{
"attribute": "table",
"op": "!=",
"value": "db1.tbl1"
}
]
}{
"store": [
{
"attribute": "query",
"op": "like",
"value": ".*WHERE.*"
}
]
}{
"attribute": <string>,
"op": <string>
"value": <string>
}{
"attribute": "user",
"op": "=",
"value": "'bob'@'%'"
}
{
"attribute": "user",
"op": "like",
"value": "bob@.*"
}{
"use": [
{
"attribute": "user",
"op": "!=",
"value": "admin"
}
]
}MySQL [testdb]> select * from access;
ERROR 1142 (42000): SELECT command denied to user 'bob'@'localhost' for table 'access'{
"store": [
{
"attribute": "table",
"op": "=",
"value": "access"
}
]
}MySQL [testdb]> select * from access;
+------+------+
| a | b |
+------+------+
| 47 | 11 |
+------+------+MySQL [testdb]> select current_user();
+----------------+
| current_user() |
+----------------+
| bob@127.0.0.1 |
+----------------+
1 row in set (0.00 sec)
MySQL [testdb]> select * from access;
+------+------+
| a | b |
+------+------+
| 47 | 11 |
+------+------+{
"store": [
{
"attribute": "table",
"op": "=",
"value": "access"
}
],
"use": [
{
"attribute": "user",
"op": "=",
"value": "'alice'@'%'"
}
]
}storage=storage_inmemorystorage=storage_memcached[Cache-Filter]
type=filter
module=cache
storage=storage_memcached
storage_memcached.server=192.168.1.31
storage_memcached.max_value_size=10Mstorage_options="server=192.168.1.31,max_value_size=10M"storage=storage_redis[Cache-Filter]
type=filter
module=cache
storage=storage_redis
storage_redis.server=192.168.1.31
storage_redis.username=hello
storage_redis.password=worldstorage_options="server=192.168.1.31,username=hello,password=world"$ redis-cli flushall[MyCache]
type=filter
module=cache
storage=storage_inmemory
soft_ttl=30
hard_ttl=45
cached_data=shared
max_size=50Mi
rules=cache_rules.json
[MyService]
type=service
...
filters=MyCache{
"store": [
{
"attribute": "table",
"op": "=",
"value": "sbtest"
}
]
}{
"store": [
{
"attribute": "database",
"op": "=",
"value": "db1"
}
]
}{
"store": [
{
"attribute": "query",
"op": "like",
"value": "FROM db1\\..*"
}
]
}{
"attribute": "query",
"op": "unlike",
"value": "FROM nomatch"
}{
"attribute": "database",
"op": "!=",
"value": "nomatch"
}